blob: e8b57cf48555f9805251a579cbbdd03fdcf065b2 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040031#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
Harry Wentland45622362017-09-12 15:58:20 -040033#include <drm/drm_atomic_helper.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040034#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deuchera5bde2f2016-09-23 16:23:41 -040043#include "amdgpu_atomfirmware.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050044#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080045#ifdef CONFIG_DRM_AMDGPU_SI
46#include "si.h"
47#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040048#ifdef CONFIG_DRM_AMDGPU_CIK
49#include "cik.h"
50#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040051#include "vi.h"
Ken Wang460826e2017-03-06 14:53:16 -050052#include "soc15.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040053#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080054#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080055#include <linux/firmware.h>
Gavin Wan89041942017-06-23 13:55:15 -040056#include "amdgpu_vf_error.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040057
Yong Zhaoba997702015-11-09 17:21:45 -050058#include "amdgpu_amdkfd.h"
Rex Zhud2f52ac2017-09-22 17:47:27 +080059#include "amdgpu_pm.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040060
Alex Deuchere2a75f82017-04-27 16:58:01 -040061MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
Alex Deucher3f76dce2017-09-01 16:20:53 -040062MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
Alex Deucher2d2e5e72017-05-09 12:27:35 -040063MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
Alex Deuchere2a75f82017-04-27 16:58:01 -040064
Shirish S2dc80b02017-05-25 10:05:25 +053065#define AMDGPU_RESUME_MS 2000
66
Alex Deucherd38ceaf2015-04-20 16:55:21 -040067static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080068 "TAHITI",
69 "PITCAIRN",
70 "VERDE",
71 "OLAND",
72 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040073 "BONAIRE",
74 "KAVERI",
75 "KABINI",
76 "HAWAII",
77 "MULLINS",
78 "TOPAZ",
79 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080080 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040081 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040082 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040083 "POLARIS10",
84 "POLARIS11",
Junwei Zhangc4642a42016-12-14 15:32:28 -050085 "POLARIS12",
Ken Wangd4196f02016-03-09 09:28:32 +080086 "VEGA10",
Feifei Xu8fab8062017-10-19 17:04:54 +080087 "VEGA12",
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +080088 "RAVEN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040089 "LAST",
90};
91
Alex Deucher5494d862018-03-09 15:14:11 -050092static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
93
Alex Deuchere3ecdff2018-03-15 17:39:45 -050094/**
95 * amdgpu_device_is_px - Is the device is a dGPU with HG/PX power control
96 *
97 * @dev: drm_device pointer
98 *
99 * Returns true if the device is a dGPU with HG/PX power control,
100 * otherwise return false.
101 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400102bool amdgpu_device_is_px(struct drm_device *dev)
103{
104 struct amdgpu_device *adev = dev->dev_private;
105
Jammy Zhou2f7d10b2015-07-22 11:29:01 +0800106 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400107 return true;
108 return false;
109}
110
111/*
112 * MMIO register access helper functions.
113 */
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500114/**
115 * amdgpu_mm_rreg - read a memory mapped IO register
116 *
117 * @adev: amdgpu_device pointer
118 * @reg: dword aligned register offset
119 * @acc_flags: access flags which require special behavior
120 *
121 * Returns the 32 bit value from the offset specified.
122 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400123uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
Monk Liu15d72fd2017-01-25 15:07:40 +0800124 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400125{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400126 uint32_t ret;
127
pding43ca8ef2017-10-13 15:38:35 +0800128 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800129 return amdgpu_virt_kiq_rreg(adev, reg);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800130
Monk Liu15d72fd2017-01-25 15:07:40 +0800131 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Tom St Denisf4b373f2016-05-31 08:02:27 -0400132 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400133 else {
134 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400135
136 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
137 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
138 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
139 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400140 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400141 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
142 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400143}
144
Monk Liu421a2a32018-01-04 18:13:20 +0800145/*
146 * MMIO register read with bytes helper functions
147 * @offset:bytes offset from MMIO start
148 *
149*/
150
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500151/**
152 * amdgpu_mm_rreg8 - read a memory mapped IO register
153 *
154 * @adev: amdgpu_device pointer
155 * @offset: byte aligned register offset
156 *
157 * Returns the 8 bit value from the offset specified.
158 */
Monk Liu421a2a32018-01-04 18:13:20 +0800159uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) {
160 if (offset < adev->rmmio_size)
161 return (readb(adev->rmmio + offset));
162 BUG();
163}
164
165/*
166 * MMIO register write with bytes helper functions
167 * @offset:bytes offset from MMIO start
168 * @value: the value want to be written to the register
169 *
170*/
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500171/**
172 * amdgpu_mm_wreg8 - read a memory mapped IO register
173 *
174 * @adev: amdgpu_device pointer
175 * @offset: byte aligned register offset
176 * @value: 8 bit value to write
177 *
178 * Writes the value specified to the offset specified.
179 */
Monk Liu421a2a32018-01-04 18:13:20 +0800180void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) {
181 if (offset < adev->rmmio_size)
182 writeb(value, adev->rmmio + offset);
183 else
184 BUG();
185}
186
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500187/**
188 * amdgpu_mm_wreg - write to a memory mapped IO register
189 *
190 * @adev: amdgpu_device pointer
191 * @reg: dword aligned register offset
192 * @v: 32 bit value to write to the register
193 * @acc_flags: access flags which require special behavior
194 *
195 * Writes the value specified to the offset specified.
196 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400197void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
Monk Liu15d72fd2017-01-25 15:07:40 +0800198 uint32_t acc_flags)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400199{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400200 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800201
Ken Wang47ed4e12017-07-04 13:11:52 +0800202 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
203 adev->last_mm_index = v;
204 }
205
pding43ca8ef2017-10-13 15:38:35 +0800206 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800207 return amdgpu_virt_kiq_wreg(adev, reg, v);
Xiangliang Yubc992ba2017-01-12 14:29:34 +0800208
Monk Liu15d72fd2017-01-25 15:07:40 +0800209 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400210 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
211 else {
212 unsigned long flags;
213
214 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
215 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
216 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
217 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
218 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800219
220 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
221 udelay(500);
222 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400223}
224
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500225/**
226 * amdgpu_io_rreg - read an IO register
227 *
228 * @adev: amdgpu_device pointer
229 * @reg: dword aligned register offset
230 *
231 * Returns the 32 bit value from the offset specified.
232 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400233u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
234{
235 if ((reg * 4) < adev->rio_mem_size)
236 return ioread32(adev->rio_mem + (reg * 4));
237 else {
238 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
239 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
240 }
241}
242
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500243/**
244 * amdgpu_io_wreg - write to an IO register
245 *
246 * @adev: amdgpu_device pointer
247 * @reg: dword aligned register offset
248 * @v: 32 bit value to write to the register
249 *
250 * Writes the value specified to the offset specified.
251 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400252void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
253{
Ken Wang47ed4e12017-07-04 13:11:52 +0800254 if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
255 adev->last_mm_index = v;
256 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400257
258 if ((reg * 4) < adev->rio_mem_size)
259 iowrite32(v, adev->rio_mem + (reg * 4));
260 else {
261 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
262 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
263 }
Ken Wang47ed4e12017-07-04 13:11:52 +0800264
265 if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
266 udelay(500);
267 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400268}
269
270/**
271 * amdgpu_mm_rdoorbell - read a doorbell dword
272 *
273 * @adev: amdgpu_device pointer
274 * @index: doorbell index
275 *
276 * Returns the value in the doorbell aperture at the
277 * requested doorbell index (CIK).
278 */
279u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
280{
281 if (index < adev->doorbell.num_doorbells) {
282 return readl(adev->doorbell.ptr + index);
283 } else {
284 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
285 return 0;
286 }
287}
288
289/**
290 * amdgpu_mm_wdoorbell - write a doorbell dword
291 *
292 * @adev: amdgpu_device pointer
293 * @index: doorbell index
294 * @v: value to write
295 *
296 * Writes @v to the doorbell aperture at the
297 * requested doorbell index (CIK).
298 */
299void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
300{
301 if (index < adev->doorbell.num_doorbells) {
302 writel(v, adev->doorbell.ptr + index);
303 } else {
304 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
305 }
306}
307
308/**
Ken Wang832be402016-03-18 15:23:08 +0800309 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
310 *
311 * @adev: amdgpu_device pointer
312 * @index: doorbell index
313 *
314 * Returns the value in the doorbell aperture at the
315 * requested doorbell index (VEGA10+).
316 */
317u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
318{
319 if (index < adev->doorbell.num_doorbells) {
320 return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
321 } else {
322 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
323 return 0;
324 }
325}
326
327/**
328 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
329 *
330 * @adev: amdgpu_device pointer
331 * @index: doorbell index
332 * @v: value to write
333 *
334 * Writes @v to the doorbell aperture at the
335 * requested doorbell index (VEGA10+).
336 */
337void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
338{
339 if (index < adev->doorbell.num_doorbells) {
340 atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
341 } else {
342 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
343 }
344}
345
346/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400347 * amdgpu_invalid_rreg - dummy reg read function
348 *
349 * @adev: amdgpu device pointer
350 * @reg: offset of register
351 *
352 * Dummy register read function. Used for register blocks
353 * that certain asics don't have (all asics).
354 * Returns the value in the register.
355 */
356static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
357{
358 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
359 BUG();
360 return 0;
361}
362
363/**
364 * amdgpu_invalid_wreg - dummy reg write function
365 *
366 * @adev: amdgpu device pointer
367 * @reg: offset of register
368 * @v: value to write to the register
369 *
370 * Dummy register read function. Used for register blocks
371 * that certain asics don't have (all asics).
372 */
373static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
374{
375 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
376 reg, v);
377 BUG();
378}
379
380/**
381 * amdgpu_block_invalid_rreg - dummy reg read function
382 *
383 * @adev: amdgpu device pointer
384 * @block: offset of instance
385 * @reg: offset of register
386 *
387 * Dummy register read function. Used for register blocks
388 * that certain asics don't have (all asics).
389 * Returns the value in the register.
390 */
391static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
392 uint32_t block, uint32_t reg)
393{
394 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
395 reg, block);
396 BUG();
397 return 0;
398}
399
400/**
401 * amdgpu_block_invalid_wreg - dummy reg write function
402 *
403 * @adev: amdgpu device pointer
404 * @block: offset of instance
405 * @reg: offset of register
406 * @v: value to write to the register
407 *
408 * Dummy register read function. Used for register blocks
409 * that certain asics don't have (all asics).
410 */
411static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
412 uint32_t block,
413 uint32_t reg, uint32_t v)
414{
415 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
416 reg, block, v);
417 BUG();
418}
419
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500420/**
421 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
422 *
423 * @adev: amdgpu device pointer
424 *
425 * Allocates a scratch page of VRAM for use by various things in the
426 * driver.
427 */
Alex Deucher06ec9072017-12-14 15:02:39 -0500428static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400429{
Christian Königa4a02772017-07-27 17:24:36 +0200430 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
431 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
432 &adev->vram_scratch.robj,
433 &adev->vram_scratch.gpu_addr,
434 (void **)&adev->vram_scratch.ptr);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400435}
436
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500437/**
438 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
439 *
440 * @adev: amdgpu device pointer
441 *
442 * Frees the VRAM scratch page.
443 */
Alex Deucher06ec9072017-12-14 15:02:39 -0500444static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400445{
Christian König078af1a2017-07-27 17:43:00 +0200446 amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400447}
448
449/**
Alex Deucher9c3f2b52017-12-14 16:20:19 -0500450 * amdgpu_device_program_register_sequence - program an array of registers.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400451 *
452 * @adev: amdgpu_device pointer
453 * @registers: pointer to the register array
454 * @array_size: size of the register array
455 *
456 * Programs an array or registers with and and or masks.
457 * This is a helper for setting golden registers.
458 */
Alex Deucher9c3f2b52017-12-14 16:20:19 -0500459void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
460 const u32 *registers,
461 const u32 array_size)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400462{
463 u32 tmp, reg, and_mask, or_mask;
464 int i;
465
466 if (array_size % 3)
467 return;
468
469 for (i = 0; i < array_size; i +=3) {
470 reg = registers[i + 0];
471 and_mask = registers[i + 1];
472 or_mask = registers[i + 2];
473
474 if (and_mask == 0xffffffff) {
475 tmp = or_mask;
476 } else {
477 tmp = RREG32(reg);
478 tmp &= ~and_mask;
479 tmp |= or_mask;
480 }
481 WREG32(reg, tmp);
482 }
483}
484
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500485/**
486 * amdgpu_device_pci_config_reset - reset the GPU
487 *
488 * @adev: amdgpu_device pointer
489 *
490 * Resets the GPU using the pci config reset sequence.
491 * Only applicable to asics prior to vega10.
492 */
Alex Deucher8111c382017-12-14 16:22:53 -0500493void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400494{
495 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
496}
497
498/*
499 * GPU doorbell aperture helpers function.
500 */
501/**
Alex Deucher06ec9072017-12-14 15:02:39 -0500502 * amdgpu_device_doorbell_init - Init doorbell driver information.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400503 *
504 * @adev: amdgpu_device pointer
505 *
506 * Init doorbell driver information (CIK)
507 * Returns 0 on success, error on failure.
508 */
Alex Deucher06ec9072017-12-14 15:02:39 -0500509static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400510{
Christian König705e5192017-06-08 11:15:16 +0200511 /* No doorbell on SI hardware generation */
512 if (adev->asic_type < CHIP_BONAIRE) {
513 adev->doorbell.base = 0;
514 adev->doorbell.size = 0;
515 adev->doorbell.num_doorbells = 0;
516 adev->doorbell.ptr = NULL;
517 return 0;
518 }
519
Christian Königd6895ad2017-02-28 10:36:43 +0100520 if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
521 return -EINVAL;
522
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400523 /* doorbell bar mapping */
524 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
525 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
526
Christian Königedf600d2016-05-03 15:54:54 +0200527 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400528 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
529 if (adev->doorbell.num_doorbells == 0)
530 return -EINVAL;
531
Christian König8972e5d2017-03-06 13:34:57 +0100532 adev->doorbell.ptr = ioremap(adev->doorbell.base,
533 adev->doorbell.num_doorbells *
534 sizeof(u32));
535 if (adev->doorbell.ptr == NULL)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400536 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400537
538 return 0;
539}
540
541/**
Alex Deucher06ec9072017-12-14 15:02:39 -0500542 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400543 *
544 * @adev: amdgpu_device pointer
545 *
546 * Tear down doorbell driver information (CIK)
547 */
Alex Deucher06ec9072017-12-14 15:02:39 -0500548static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400549{
550 iounmap(adev->doorbell.ptr);
551 adev->doorbell.ptr = NULL;
552}
553
Alex Deucher22cb0162017-12-14 16:27:11 -0500554
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400555
556/*
Alex Deucher06ec9072017-12-14 15:02:39 -0500557 * amdgpu_device_wb_*()
Alex Xie455a7bc2017-05-08 21:36:03 -0400558 * Writeback is the method by which the GPU updates special pages in memory
Alex Xieea81a172017-05-08 13:41:11 -0400559 * with the status of certain GPU events (fences, ring pointers,etc.).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400560 */
561
562/**
Alex Deucher06ec9072017-12-14 15:02:39 -0500563 * amdgpu_device_wb_fini - Disable Writeback and free memory
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400564 *
565 * @adev: amdgpu_device pointer
566 *
567 * Disables Writeback and frees the Writeback memory (all asics).
568 * Used at driver shutdown.
569 */
Alex Deucher06ec9072017-12-14 15:02:39 -0500570static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400571{
572 if (adev->wb.wb_obj) {
Alex Deuchera76ed482016-10-21 15:30:36 -0400573 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
574 &adev->wb.gpu_addr,
575 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400576 adev->wb.wb_obj = NULL;
577 }
578}
579
580/**
Alex Deucher06ec9072017-12-14 15:02:39 -0500581 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400582 *
583 * @adev: amdgpu_device pointer
584 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400585 * Initializes writeback and allocates writeback memory (all asics).
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400586 * Used at driver startup.
587 * Returns 0 on success or an -error on failure.
588 */
Alex Deucher06ec9072017-12-14 15:02:39 -0500589static int amdgpu_device_wb_init(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400590{
591 int r;
592
593 if (adev->wb.wb_obj == NULL) {
Alex Deucher97407b62017-07-28 12:14:15 -0400594 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
595 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
Alex Deuchera76ed482016-10-21 15:30:36 -0400596 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
597 &adev->wb.wb_obj, &adev->wb.gpu_addr,
598 (void **)&adev->wb.wb);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400599 if (r) {
600 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
601 return r;
602 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400603
604 adev->wb.num_wb = AMDGPU_MAX_WB;
605 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
606
607 /* clear wb memory */
Monk Liu73469582017-12-29 17:06:41 +0800608 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400609 }
610
611 return 0;
612}
613
614/**
Alex Deucher131b4b32017-12-14 16:03:43 -0500615 * amdgpu_device_wb_get - Allocate a wb entry
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400616 *
617 * @adev: amdgpu_device pointer
618 * @wb: wb index
619 *
620 * Allocate a wb slot for use by the driver (all asics).
621 * Returns 0 on success or -EINVAL on failure.
622 */
Alex Deucher131b4b32017-12-14 16:03:43 -0500623int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400624{
625 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
Alex Deucher97407b62017-07-28 12:14:15 -0400626
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400627 if (offset < adev->wb.num_wb) {
628 __set_bit(offset, adev->wb.used);
Monk Liu63ae07c2017-10-17 19:18:56 +0800629 *wb = offset << 3; /* convert to dw offset */
Monk Liu0915fdb2017-06-19 10:19:41 -0400630 return 0;
631 } else {
632 return -EINVAL;
633 }
634}
635
Ken Wang70142852016-03-18 15:08:49 +0800636/**
Alex Deucher131b4b32017-12-14 16:03:43 -0500637 * amdgpu_device_wb_free - Free a wb entry
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400638 *
639 * @adev: amdgpu_device pointer
640 * @wb: wb index
641 *
642 * Free a wb slot allocated for use by the driver (all asics)
643 */
Alex Deucher131b4b32017-12-14 16:03:43 -0500644void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400645{
Monk Liu73469582017-12-29 17:06:41 +0800646 wb >>= 3;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400647 if (wb < adev->wb.num_wb)
Monk Liu73469582017-12-29 17:06:41 +0800648 __clear_bit(wb, adev->wb.used);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400649}
650
651/**
Alex Deucher2543e282017-12-14 16:33:36 -0500652 * amdgpu_device_vram_location - try to find VRAM location
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500653 *
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400654 * @adev: amdgpu device structure holding all necessary informations
655 * @mc: memory controller structure holding memory informations
656 * @base: base address at which to put VRAM
657 *
Alex Xie455a7bc2017-05-08 21:36:03 -0400658 * Function will try to place VRAM at base address provided
Christian König3d647c82017-11-16 19:36:10 +0100659 * as parameter.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400660 */
Alex Deucher2543e282017-12-14 16:33:36 -0500661void amdgpu_device_vram_location(struct amdgpu_device *adev,
Christian König770d13b2018-01-12 14:52:22 +0100662 struct amdgpu_gmc *mc, u64 base)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400663{
664 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
665
666 mc->vram_start = base;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400667 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
668 if (limit && limit < mc->real_vram_size)
669 mc->real_vram_size = limit;
670 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
671 mc->mc_vram_size >> 20, mc->vram_start,
672 mc->vram_end, mc->real_vram_size >> 20);
673}
674
675/**
Alex Deucher2543e282017-12-14 16:33:36 -0500676 * amdgpu_device_gart_location - try to find GTT location
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500677 *
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400678 * @adev: amdgpu device structure holding all necessary informations
679 * @mc: memory controller structure holding memory informations
680 *
681 * Function will place try to place GTT before or after VRAM.
682 *
683 * If GTT size is bigger than space left then we ajust GTT size.
684 * Thus function will never fails.
685 *
686 * FIXME: when reducing GTT size align new size on power of 2.
687 */
Alex Deucher2543e282017-12-14 16:33:36 -0500688void amdgpu_device_gart_location(struct amdgpu_device *adev,
Christian König770d13b2018-01-12 14:52:22 +0100689 struct amdgpu_gmc *mc)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400690{
691 u64 size_af, size_bf;
692
Rex Zhu7951e372018-04-13 16:13:41 +0800693 mc->gart_size += adev->pm.smu_prv_buffer_size;
694
Christian König770d13b2018-01-12 14:52:22 +0100695 size_af = adev->gmc.mc_mask - mc->vram_end;
Christian Königed21c042017-07-06 22:26:05 +0200696 size_bf = mc->vram_start;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400697 if (size_bf > size_af) {
Christian König6f02a692017-07-07 11:56:59 +0200698 if (mc->gart_size > size_bf) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400699 dev_warn(adev->dev, "limiting GTT\n");
Christian König6f02a692017-07-07 11:56:59 +0200700 mc->gart_size = size_bf;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400701 }
Christian König6f02a692017-07-07 11:56:59 +0200702 mc->gart_start = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400703 } else {
Christian König6f02a692017-07-07 11:56:59 +0200704 if (mc->gart_size > size_af) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400705 dev_warn(adev->dev, "limiting GTT\n");
Christian König6f02a692017-07-07 11:56:59 +0200706 mc->gart_size = size_af;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400707 }
Christian Königb98f1b92017-11-16 20:12:51 +0100708 /* VCE doesn't like it when BOs cross a 4GB segment, so align
709 * the GART base on a 4GB boundary as well.
710 */
711 mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400712 }
Christian König6f02a692017-07-07 11:56:59 +0200713 mc->gart_end = mc->gart_start + mc->gart_size - 1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400714 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
Christian König6f02a692017-07-07 11:56:59 +0200715 mc->gart_size >> 20, mc->gart_start, mc->gart_end);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400716}
717
Christian Königd6895ad2017-02-28 10:36:43 +0100718/**
719 * amdgpu_device_resize_fb_bar - try to resize FB BAR
720 *
721 * @adev: amdgpu_device pointer
722 *
723 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
724 * to fail, but if any of the BARs is not accessible after the size we abort
725 * driver loading by returning -ENODEV.
726 */
727int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
728{
Christian König770d13b2018-01-12 14:52:22 +0100729 u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
Christian Königd6895ad2017-02-28 10:36:43 +0100730 u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
Christian König31b8ada2017-11-15 20:07:38 +0100731 struct pci_bus *root;
732 struct resource *res;
733 unsigned i;
Christian Königd6895ad2017-02-28 10:36:43 +0100734 u16 cmd;
735 int r;
736
pding0c03b912017-11-07 11:02:00 +0800737 /* Bypass for VF */
738 if (amdgpu_sriov_vf(adev))
739 return 0;
740
Christian König31b8ada2017-11-15 20:07:38 +0100741 /* Check if the root BUS has 64bit memory resources */
742 root = adev->pdev->bus;
743 while (root->parent)
744 root = root->parent;
745
746 pci_bus_for_each_resource(root, res, i) {
Christian König0ebb7c52018-01-07 10:18:57 +0100747 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
Christian König31b8ada2017-11-15 20:07:38 +0100748 res->start > 0x100000000ull)
749 break;
750 }
751
752 /* Trying to resize is pointless without a root hub window above 4GB */
753 if (!res)
754 return 0;
755
Christian Königd6895ad2017-02-28 10:36:43 +0100756 /* Disable memory decoding while we change the BAR addresses and size */
757 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
758 pci_write_config_word(adev->pdev, PCI_COMMAND,
759 cmd & ~PCI_COMMAND_MEMORY);
760
761 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
Alex Deucher06ec9072017-12-14 15:02:39 -0500762 amdgpu_device_doorbell_fini(adev);
Christian Königd6895ad2017-02-28 10:36:43 +0100763 if (adev->asic_type >= CHIP_BONAIRE)
764 pci_release_resource(adev->pdev, 2);
765
766 pci_release_resource(adev->pdev, 0);
767
768 r = pci_resize_resource(adev->pdev, 0, rbar_size);
769 if (r == -ENOSPC)
770 DRM_INFO("Not enough PCI address space for a large BAR.");
771 else if (r && r != -ENOTSUPP)
772 DRM_ERROR("Problem resizing BAR0 (%d).", r);
773
774 pci_assign_unassigned_bus_resources(adev->pdev->bus);
775
776 /* When the doorbell or fb BAR isn't available we have no chance of
777 * using the device.
778 */
Alex Deucher06ec9072017-12-14 15:02:39 -0500779 r = amdgpu_device_doorbell_init(adev);
Christian Königd6895ad2017-02-28 10:36:43 +0100780 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
781 return -ENODEV;
782
783 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
784
785 return 0;
786}
Horace Chena05502e2017-09-29 14:41:57 +0800787
788/*
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400789 * GPU helpers function.
790 */
791/**
Alex Deucher39c640c2017-12-15 16:22:11 -0500792 * amdgpu_device_need_post - check if the hw need post or not
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400793 *
794 * @adev: amdgpu_device pointer
795 *
Jim Quc836fec2017-02-10 15:59:59 +0800796 * Check if the asic has been initialized (all asics) at driver startup
797 * or post is needed if hw reset is performed.
798 * Returns true if need or false if not.
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400799 */
Alex Deucher39c640c2017-12-15 16:22:11 -0500800bool amdgpu_device_need_post(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400801{
802 uint32_t reg;
803
Monk Liubec86372016-09-14 19:38:08 +0800804 if (amdgpu_sriov_vf(adev))
805 return false;
806
807 if (amdgpu_passthrough(adev)) {
Monk Liu1da2c322016-11-11 11:24:29 +0800808 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
809 * some old smc fw still need driver do vPost otherwise gpu hang, while
810 * those smc fw version above 22.15 doesn't have this flaw, so we force
811 * vpost executed for smc version below 22.15
Monk Liubec86372016-09-14 19:38:08 +0800812 */
813 if (adev->asic_type == CHIP_FIJI) {
814 int err;
815 uint32_t fw_ver;
816 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
817 /* force vPost if error occured */
818 if (err)
819 return true;
820
821 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
Monk Liu1da2c322016-11-11 11:24:29 +0800822 if (fw_ver < 0x00160e00)
823 return true;
Monk Liubec86372016-09-14 19:38:08 +0800824 }
Monk Liubec86372016-09-14 19:38:08 +0800825 }
pding91fe77e2017-10-19 09:38:39 +0800826
827 if (adev->has_hw_reset) {
828 adev->has_hw_reset = false;
829 return true;
830 }
831
832 /* bios scratch used on CIK+ */
833 if (adev->asic_type >= CHIP_BONAIRE)
834 return amdgpu_atombios_scratch_need_asic_init(adev);
835
836 /* check MEM_SIZE for older asics */
837 reg = amdgpu_asic_get_config_memsize(adev);
838
839 if ((reg != 0) && (reg != 0xffffffff))
840 return false;
841
842 return true;
Monk Liubec86372016-09-14 19:38:08 +0800843}
844
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400845/* if we get transitioned to only one device, take VGA back */
846/**
Alex Deucher06ec9072017-12-14 15:02:39 -0500847 * amdgpu_device_vga_set_decode - enable/disable vga decode
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400848 *
849 * @cookie: amdgpu_device pointer
850 * @state: enable/disable vga decode
851 *
852 * Enable/disable vga decode (all asics).
853 * Returns VGA resource flags.
854 */
Alex Deucher06ec9072017-12-14 15:02:39 -0500855static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400856{
857 struct amdgpu_device *adev = cookie;
858 amdgpu_asic_set_vga_state(adev, state);
859 if (state)
860 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
861 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
862 else
863 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
864}
865
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500866/**
867 * amdgpu_device_check_block_size - validate the vm block size
868 *
869 * @adev: amdgpu_device pointer
870 *
871 * Validates the vm block size specified via module parameter.
872 * The vm block size defines number of bits in page table versus page directory,
873 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
874 * page table and the remaining bits are in the page directory.
875 */
Alex Deucher06ec9072017-12-14 15:02:39 -0500876static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
Chunming Zhoua1adf8b2017-03-27 11:36:57 +0800877{
878 /* defines number of bits in page table versus page directory,
879 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
880 * page table and the remaining bits are in the page directory */
Junwei Zhangbab4fee2017-04-05 13:54:56 +0800881 if (amdgpu_vm_block_size == -1)
882 return;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +0800883
Junwei Zhangbab4fee2017-04-05 13:54:56 +0800884 if (amdgpu_vm_block_size < 9) {
Chunming Zhoua1adf8b2017-03-27 11:36:57 +0800885 dev_warn(adev->dev, "VM page table size (%d) too small\n",
886 amdgpu_vm_block_size);
Christian König97489122017-11-27 16:22:05 +0100887 amdgpu_vm_block_size = -1;
Chunming Zhoua1adf8b2017-03-27 11:36:57 +0800888 }
Chunming Zhoua1adf8b2017-03-27 11:36:57 +0800889}
890
Alex Deuchere3ecdff2018-03-15 17:39:45 -0500891/**
892 * amdgpu_device_check_vm_size - validate the vm size
893 *
894 * @adev: amdgpu_device pointer
895 *
896 * Validates the vm size in GB specified via module parameter.
897 * The VM size is the size of the GPU virtual memory space in GB.
898 */
Alex Deucher06ec9072017-12-14 15:02:39 -0500899static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
Zhang, Jerry83ca1452017-03-29 16:08:31 +0800900{
Alex Deucher64dab072017-06-15 18:20:09 -0400901 /* no need to check the default value */
902 if (amdgpu_vm_size == -1)
903 return;
904
Zhang, Jerry83ca1452017-03-29 16:08:31 +0800905 if (amdgpu_vm_size < 1) {
906 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
907 amdgpu_vm_size);
Christian Königf3368122017-11-23 12:57:18 +0100908 amdgpu_vm_size = -1;
Zhang, Jerry83ca1452017-03-29 16:08:31 +0800909 }
Zhang, Jerry83ca1452017-03-29 16:08:31 +0800910}
911
Rex Zhu7951e372018-04-13 16:13:41 +0800912static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
913{
914 struct sysinfo si;
915 bool is_os_64 = (sizeof(void *) == 8) ? true : false;
916 uint64_t total_memory;
917 uint64_t dram_size_seven_GB = 0x1B8000000;
918 uint64_t dram_size_three_GB = 0xB8000000;
919
920 if (amdgpu_smu_memory_pool_size == 0)
921 return;
922
923 if (!is_os_64) {
924 DRM_WARN("Not 64-bit OS, feature not supported\n");
925 goto def_value;
926 }
927 si_meminfo(&si);
928 total_memory = (uint64_t)si.totalram * si.mem_unit;
929
930 if ((amdgpu_smu_memory_pool_size == 1) ||
931 (amdgpu_smu_memory_pool_size == 2)) {
932 if (total_memory < dram_size_three_GB)
933 goto def_value1;
934 } else if ((amdgpu_smu_memory_pool_size == 4) ||
935 (amdgpu_smu_memory_pool_size == 8)) {
936 if (total_memory < dram_size_seven_GB)
937 goto def_value1;
938 } else {
939 DRM_WARN("Smu memory pool size not supported\n");
940 goto def_value;
941 }
942 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
943
944 return;
945
946def_value1:
947 DRM_WARN("No enough system memory\n");
948def_value:
949 adev->pm.smu_prv_buffer_size = 0;
950}
951
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400952/**
Alex Deucher06ec9072017-12-14 15:02:39 -0500953 * amdgpu_device_check_arguments - validate module params
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400954 *
955 * @adev: amdgpu_device pointer
956 *
957 * Validates certain module parameters and updates
958 * the associated values used by the driver (all asics).
959 */
Alex Deucher06ec9072017-12-14 15:02:39 -0500960static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400961{
Chunming Zhou5b011232015-12-10 17:34:33 +0800962 if (amdgpu_sched_jobs < 4) {
963 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
964 amdgpu_sched_jobs);
965 amdgpu_sched_jobs = 4;
Alex Deucher76117502017-06-21 12:31:41 -0400966 } else if (!is_power_of_2(amdgpu_sched_jobs)){
Chunming Zhou5b011232015-12-10 17:34:33 +0800967 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
968 amdgpu_sched_jobs);
969 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
970 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400971
Alex Deucher83e74db2017-08-21 11:58:25 -0400972 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
Christian Königf9321cc2017-07-07 13:44:05 +0200973 /* gart size must be greater or equal to 32M */
974 dev_warn(adev->dev, "gart size (%d) too small\n",
975 amdgpu_gart_size);
Alex Deucher83e74db2017-08-21 11:58:25 -0400976 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400977 }
978
Christian König36d38372017-07-07 13:17:45 +0200979 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400980 /* gtt size must be greater or equal to 32M */
Christian König36d38372017-07-07 13:17:45 +0200981 dev_warn(adev->dev, "gtt size (%d) too small\n",
982 amdgpu_gtt_size);
983 amdgpu_gtt_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400984 }
985
Roger Hed07f14b2017-08-15 16:05:59 +0800986 /* valid range is between 4 and 9 inclusive */
987 if (amdgpu_vm_fragment_size != -1 &&
988 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
989 dev_warn(adev->dev, "valid range is between 4 and 9\n");
990 amdgpu_vm_fragment_size = -1;
991 }
992
Rex Zhu7951e372018-04-13 16:13:41 +0800993 amdgpu_device_check_smu_prv_buffer_size(adev);
994
Alex Deucher06ec9072017-12-14 15:02:39 -0500995 amdgpu_device_check_vm_size(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400996
Alex Deucher06ec9072017-12-14 15:02:39 -0500997 amdgpu_device_check_block_size(adev);
Christian König6a7f76e2016-08-24 15:51:49 +0200998
jimqu526bae32016-11-07 09:53:10 +0800999 if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
Alex Deucher76117502017-06-21 12:31:41 -04001000 !is_power_of_2(amdgpu_vram_page_split))) {
Christian König6a7f76e2016-08-24 15:51:49 +02001001 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1002 amdgpu_vram_page_split);
1003 amdgpu_vram_page_split = 1024;
1004 }
Andrey Grodzovsky88546952017-12-13 14:36:53 -05001005
1006 if (amdgpu_lockup_timeout == 0) {
1007 dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
1008 amdgpu_lockup_timeout = 10000;
1009 }
Alex Deucher19aede72018-03-09 15:06:35 -05001010
1011 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001012}
1013
1014/**
1015 * amdgpu_switcheroo_set_state - set switcheroo state
1016 *
1017 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001018 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001019 *
1020 * Callback for the switcheroo driver. Suspends or resumes the
1021 * the asics before or after it is powered up using ACPI methods.
1022 */
1023static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1024{
1025 struct drm_device *dev = pci_get_drvdata(pdev);
1026
1027 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1028 return;
1029
1030 if (state == VGA_SWITCHEROO_ON) {
Joe Perches7ca85292017-02-28 04:55:52 -08001031 pr_info("amdgpu: switched on\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001032 /* don't suspend or resume card normally */
1033 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1034
Alex Deucher810ddc32016-08-23 13:25:49 -04001035 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001036
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001037 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1038 drm_kms_helper_poll_enable(dev);
1039 } else {
Joe Perches7ca85292017-02-28 04:55:52 -08001040 pr_info("amdgpu: switched off\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001041 drm_kms_helper_poll_disable(dev);
1042 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001043 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001044 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1045 }
1046}
1047
1048/**
1049 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1050 *
1051 * @pdev: pci dev pointer
1052 *
1053 * Callback for the switcheroo driver. Check of the switcheroo
1054 * state can be changed.
1055 * Returns true if the state can be changed, false if not.
1056 */
1057static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1058{
1059 struct drm_device *dev = pci_get_drvdata(pdev);
1060
1061 /*
1062 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1063 * locking inversion with the driver load path. And the access here is
1064 * completely racy anyway. So don't bother with locking for now.
1065 */
1066 return dev->open_count == 0;
1067}
1068
1069static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1070 .set_gpu_state = amdgpu_switcheroo_set_state,
1071 .reprobe = NULL,
1072 .can_switch = amdgpu_switcheroo_can_switch,
1073};
1074
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001075/**
1076 * amdgpu_device_ip_set_clockgating_state - set the CG state
1077 *
1078 * @adev: amdgpu_device pointer
1079 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1080 * @state: clockgating state (gate or ungate)
1081 *
1082 * Sets the requested clockgating state for all instances of
1083 * the hardware IP specified.
1084 * Returns the error code from the last instance.
1085 */
Rex Zhu43fa5612018-03-28 13:42:45 -05001086int amdgpu_device_ip_set_clockgating_state(void *dev,
Alex Deucher2990a1f2017-12-15 16:18:00 -05001087 enum amd_ip_block_type block_type,
1088 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001089{
Rex Zhu43fa5612018-03-28 13:42:45 -05001090 struct amdgpu_device *adev = dev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001091 int i, r = 0;
1092
1093 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001094 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001095 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001096 if (adev->ip_blocks[i].version->type != block_type)
1097 continue;
1098 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1099 continue;
1100 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1101 (void *)adev, state);
1102 if (r)
1103 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1104 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001105 }
1106 return r;
1107}
1108
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001109/**
1110 * amdgpu_device_ip_set_powergating_state - set the PG state
1111 *
1112 * @adev: amdgpu_device pointer
1113 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1114 * @state: powergating state (gate or ungate)
1115 *
1116 * Sets the requested powergating state for all instances of
1117 * the hardware IP specified.
1118 * Returns the error code from the last instance.
1119 */
Rex Zhu43fa5612018-03-28 13:42:45 -05001120int amdgpu_device_ip_set_powergating_state(void *dev,
Alex Deucher2990a1f2017-12-15 16:18:00 -05001121 enum amd_ip_block_type block_type,
1122 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001123{
Rex Zhu43fa5612018-03-28 13:42:45 -05001124 struct amdgpu_device *adev = dev;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001125 int i, r = 0;
1126
1127 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001128 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001129 continue;
Rex Zhuc7228652017-02-22 15:33:46 +08001130 if (adev->ip_blocks[i].version->type != block_type)
1131 continue;
1132 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1133 continue;
1134 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1135 (void *)adev, state);
1136 if (r)
1137 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1138 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001139 }
1140 return r;
1141}
1142
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001143/**
1144 * amdgpu_device_ip_get_clockgating_state - get the CG state
1145 *
1146 * @adev: amdgpu_device pointer
1147 * @flags: clockgating feature flags
1148 *
1149 * Walks the list of IPs on the device and updates the clockgating
1150 * flags for each IP.
1151 * Updates @flags with the feature flags for each hardware IP where
1152 * clockgating is enabled.
1153 */
Alex Deucher2990a1f2017-12-15 16:18:00 -05001154void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1155 u32 *flags)
Huang Rui6cb2d4e2017-01-05 18:44:41 +08001156{
1157 int i;
1158
1159 for (i = 0; i < adev->num_ip_blocks; i++) {
1160 if (!adev->ip_blocks[i].status.valid)
1161 continue;
1162 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1163 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1164 }
1165}
1166
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001167/**
1168 * amdgpu_device_ip_wait_for_idle - wait for idle
1169 *
1170 * @adev: amdgpu_device pointer
1171 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1172 *
1173 * Waits for the request hardware IP to be idle.
1174 * Returns 0 for success or a negative error code on failure.
1175 */
Alex Deucher2990a1f2017-12-15 16:18:00 -05001176int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1177 enum amd_ip_block_type block_type)
Alex Deucher5dbbb602016-06-23 11:41:04 -04001178{
1179 int i, r;
1180
1181 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001182 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001183 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001184 if (adev->ip_blocks[i].version->type == block_type) {
1185 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001186 if (r)
1187 return r;
1188 break;
1189 }
1190 }
1191 return 0;
1192
1193}
1194
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001195/**
1196 * amdgpu_device_ip_is_idle - is the hardware IP idle
1197 *
1198 * @adev: amdgpu_device pointer
1199 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1200 *
1201 * Check if the hardware IP is idle or not.
1202 * Returns true if it the IP is idle, false if not.
1203 */
Alex Deucher2990a1f2017-12-15 16:18:00 -05001204bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1205 enum amd_ip_block_type block_type)
Alex Deucher5dbbb602016-06-23 11:41:04 -04001206{
1207 int i;
1208
1209 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001210 if (!adev->ip_blocks[i].status.valid)
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001211 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001212 if (adev->ip_blocks[i].version->type == block_type)
1213 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
Alex Deucher5dbbb602016-06-23 11:41:04 -04001214 }
1215 return true;
1216
1217}
1218
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001219/**
1220 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1221 *
1222 * @adev: amdgpu_device pointer
1223 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1224 *
1225 * Returns a pointer to the hardware IP block structure
1226 * if it exists for the asic, otherwise NULL.
1227 */
Alex Deucher2990a1f2017-12-15 16:18:00 -05001228struct amdgpu_ip_block *
1229amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1230 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001231{
1232 int i;
1233
1234 for (i = 0; i < adev->num_ip_blocks; i++)
Alex Deuchera1255102016-10-13 17:41:13 -04001235 if (adev->ip_blocks[i].version->type == type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001236 return &adev->ip_blocks[i];
1237
1238 return NULL;
1239}
1240
1241/**
Alex Deucher2990a1f2017-12-15 16:18:00 -05001242 * amdgpu_device_ip_block_version_cmp
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001243 *
1244 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001245 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001246 * @major: major version
1247 * @minor: minor version
1248 *
1249 * return 0 if equal or greater
1250 * return 1 if smaller or the ip_block doesn't exist
1251 */
Alex Deucher2990a1f2017-12-15 16:18:00 -05001252int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1253 enum amd_ip_block_type type,
1254 u32 major, u32 minor)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001255{
Alex Deucher2990a1f2017-12-15 16:18:00 -05001256 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001257
Alex Deuchera1255102016-10-13 17:41:13 -04001258 if (ip_block && ((ip_block->version->major > major) ||
1259 ((ip_block->version->major == major) &&
1260 (ip_block->version->minor >= minor))))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001261 return 0;
1262
1263 return 1;
1264}
1265
Alex Deuchera1255102016-10-13 17:41:13 -04001266/**
Alex Deucher2990a1f2017-12-15 16:18:00 -05001267 * amdgpu_device_ip_block_add
Alex Deuchera1255102016-10-13 17:41:13 -04001268 *
1269 * @adev: amdgpu_device pointer
1270 * @ip_block_version: pointer to the IP to add
1271 *
1272 * Adds the IP block driver information to the collection of IPs
1273 * on the asic.
1274 */
Alex Deucher2990a1f2017-12-15 16:18:00 -05001275int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1276 const struct amdgpu_ip_block_version *ip_block_version)
Alex Deuchera1255102016-10-13 17:41:13 -04001277{
1278 if (!ip_block_version)
1279 return -EINVAL;
1280
Shaoyun Liue966a722018-02-01 16:45:26 -05001281 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
Huang Ruia0bae352017-05-03 09:52:06 +08001282 ip_block_version->funcs->name);
1283
Alex Deuchera1255102016-10-13 17:41:13 -04001284 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1285
1286 return 0;
1287}
1288
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001289/**
1290 * amdgpu_device_enable_virtual_display - enable virtual display feature
1291 *
1292 * @adev: amdgpu_device pointer
1293 *
1294 * Enabled the virtual display feature if the user has enabled it via
1295 * the module parameter virtual_display. This feature provides a virtual
1296 * display hardware on headless boards or in virtualized environments.
1297 * This function parses and validates the configuration string specified by
1298 * the user and configues the virtual display configuration (number of
1299 * virtual connectors, crtcs, etc.) specified.
1300 */
Alex Deucher483ef982016-09-30 12:43:04 -04001301static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
Emily Deng9accf2f2016-08-10 16:01:25 +08001302{
1303 adev->enable_virtual_display = false;
1304
1305 if (amdgpu_virtual_display) {
1306 struct drm_device *ddev = adev->ddev;
1307 const char *pci_address_name = pci_name(ddev->pdev);
Emily Deng0f663562016-09-30 13:02:18 -04001308 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
Emily Deng9accf2f2016-08-10 16:01:25 +08001309
1310 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1311 pciaddstr_tmp = pciaddstr;
Emily Deng0f663562016-09-30 13:02:18 -04001312 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1313 pciaddname = strsep(&pciaddname_tmp, ",");
Yintian Tao967de2a2017-01-22 15:16:51 +08001314 if (!strcmp("all", pciaddname)
1315 || !strcmp(pci_address_name, pciaddname)) {
Emily Deng0f663562016-09-30 13:02:18 -04001316 long num_crtc;
1317 int res = -1;
1318
Emily Deng9accf2f2016-08-10 16:01:25 +08001319 adev->enable_virtual_display = true;
Emily Deng0f663562016-09-30 13:02:18 -04001320
1321 if (pciaddname_tmp)
1322 res = kstrtol(pciaddname_tmp, 10,
1323 &num_crtc);
1324
1325 if (!res) {
1326 if (num_crtc < 1)
1327 num_crtc = 1;
1328 if (num_crtc > 6)
1329 num_crtc = 6;
1330 adev->mode_info.num_crtc = num_crtc;
1331 } else {
1332 adev->mode_info.num_crtc = 1;
1333 }
Emily Deng9accf2f2016-08-10 16:01:25 +08001334 break;
1335 }
1336 }
1337
Emily Deng0f663562016-09-30 13:02:18 -04001338 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1339 amdgpu_virtual_display, pci_address_name,
1340 adev->enable_virtual_display, adev->mode_info.num_crtc);
Emily Deng9accf2f2016-08-10 16:01:25 +08001341
1342 kfree(pciaddstr);
1343 }
1344}
1345
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001346/**
1347 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1348 *
1349 * @adev: amdgpu_device pointer
1350 *
1351 * Parses the asic configuration parameters specified in the gpu info
1352 * firmware and makes them availale to the driver for use in configuring
1353 * the asic.
1354 * Returns 0 on success, -EINVAL on failure.
1355 */
Alex Deuchere2a75f82017-04-27 16:58:01 -04001356static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1357{
Alex Deuchere2a75f82017-04-27 16:58:01 -04001358 const char *chip_name;
1359 char fw_name[30];
1360 int err;
1361 const struct gpu_info_firmware_header_v1_0 *hdr;
1362
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001363 adev->firmware.gpu_info_fw = NULL;
1364
Alex Deuchere2a75f82017-04-27 16:58:01 -04001365 switch (adev->asic_type) {
1366 case CHIP_TOPAZ:
1367 case CHIP_TONGA:
1368 case CHIP_FIJI:
1369 case CHIP_POLARIS11:
1370 case CHIP_POLARIS10:
1371 case CHIP_POLARIS12:
1372 case CHIP_CARRIZO:
1373 case CHIP_STONEY:
1374#ifdef CONFIG_DRM_AMDGPU_SI
1375 case CHIP_VERDE:
1376 case CHIP_TAHITI:
1377 case CHIP_PITCAIRN:
1378 case CHIP_OLAND:
1379 case CHIP_HAINAN:
1380#endif
1381#ifdef CONFIG_DRM_AMDGPU_CIK
1382 case CHIP_BONAIRE:
1383 case CHIP_HAWAII:
1384 case CHIP_KAVERI:
1385 case CHIP_KABINI:
1386 case CHIP_MULLINS:
1387#endif
1388 default:
1389 return 0;
1390 case CHIP_VEGA10:
1391 chip_name = "vega10";
1392 break;
Alex Deucher3f76dce2017-09-01 16:20:53 -04001393 case CHIP_VEGA12:
1394 chip_name = "vega12";
1395 break;
Alex Deucher2d2e5e72017-05-09 12:27:35 -04001396 case CHIP_RAVEN:
1397 chip_name = "raven";
1398 break;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001399 }
1400
1401 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001402 err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001403 if (err) {
1404 dev_err(adev->dev,
1405 "Failed to load gpu_info firmware \"%s\"\n",
1406 fw_name);
1407 goto out;
1408 }
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001409 err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001410 if (err) {
1411 dev_err(adev->dev,
1412 "Failed to validate gpu_info firmware \"%s\"\n",
1413 fw_name);
1414 goto out;
1415 }
1416
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001417 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
Alex Deuchere2a75f82017-04-27 16:58:01 -04001418 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1419
1420 switch (hdr->version_major) {
1421 case 1:
1422 {
1423 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
Huang Ruiab4fe3e2017-06-05 22:11:59 +08001424 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
Alex Deuchere2a75f82017-04-27 16:58:01 -04001425 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1426
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001427 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1428 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1429 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1430 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001431 adev->gfx.config.max_texture_channel_caches =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001432 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1433 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1434 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1435 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1436 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001437 adev->gfx.config.double_offchip_lds_buf =
Alex Deucherb5ab16b2017-05-11 19:09:49 -04001438 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1439 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
Hawking Zhang51fd0372017-06-09 22:30:52 +08001440 adev->gfx.cu_info.max_waves_per_simd =
1441 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1442 adev->gfx.cu_info.max_scratch_slots_per_cu =
1443 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1444 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
Alex Deuchere2a75f82017-04-27 16:58:01 -04001445 break;
1446 }
1447 default:
1448 dev_err(adev->dev,
1449 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1450 err = -EINVAL;
1451 goto out;
1452 }
1453out:
Alex Deuchere2a75f82017-04-27 16:58:01 -04001454 return err;
1455}
1456
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001457/**
1458 * amdgpu_device_ip_early_init - run early init for hardware IPs
1459 *
1460 * @adev: amdgpu_device pointer
1461 *
1462 * Early initialization pass for hardware IPs. The hardware IPs that make
1463 * up each asic are discovered each IP's early_init callback is run. This
1464 * is the first stage in initializing the asic.
1465 * Returns 0 on success, negative error code on failure.
1466 */
Alex Deucher06ec9072017-12-14 15:02:39 -05001467static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001468{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001469 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001470
Alex Deucher483ef982016-09-30 12:43:04 -04001471 amdgpu_device_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001472
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001473 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001474 case CHIP_TOPAZ:
1475 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001476 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001477 case CHIP_POLARIS11:
1478 case CHIP_POLARIS10:
Junwei Zhangc4642a42016-12-14 15:32:28 -05001479 case CHIP_POLARIS12:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001480 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001481 case CHIP_STONEY:
1482 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001483 adev->family = AMDGPU_FAMILY_CZ;
1484 else
1485 adev->family = AMDGPU_FAMILY_VI;
1486
1487 r = vi_set_ip_blocks(adev);
1488 if (r)
1489 return r;
1490 break;
Ken Wang33f34802016-01-21 17:29:41 +08001491#ifdef CONFIG_DRM_AMDGPU_SI
1492 case CHIP_VERDE:
1493 case CHIP_TAHITI:
1494 case CHIP_PITCAIRN:
1495 case CHIP_OLAND:
1496 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001497 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001498 r = si_set_ip_blocks(adev);
1499 if (r)
1500 return r;
1501 break;
1502#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001503#ifdef CONFIG_DRM_AMDGPU_CIK
1504 case CHIP_BONAIRE:
1505 case CHIP_HAWAII:
1506 case CHIP_KAVERI:
1507 case CHIP_KABINI:
1508 case CHIP_MULLINS:
1509 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1510 adev->family = AMDGPU_FAMILY_CI;
1511 else
1512 adev->family = AMDGPU_FAMILY_KV;
1513
1514 r = cik_set_ip_blocks(adev);
1515 if (r)
1516 return r;
1517 break;
1518#endif
Alex Deuchere48a3cd2017-09-01 16:22:35 -04001519 case CHIP_VEGA10:
1520 case CHIP_VEGA12:
1521 case CHIP_RAVEN:
Chunming Zhou2ca8a5d2016-12-07 17:31:19 +08001522 if (adev->asic_type == CHIP_RAVEN)
1523 adev->family = AMDGPU_FAMILY_RV;
1524 else
1525 adev->family = AMDGPU_FAMILY_AI;
Ken Wang460826e2017-03-06 14:53:16 -05001526
1527 r = soc15_set_ip_blocks(adev);
1528 if (r)
1529 return r;
1530 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001531 default:
1532 /* FIXME: not supported yet */
1533 return -EINVAL;
1534 }
1535
Alex Deuchere2a75f82017-04-27 16:58:01 -04001536 r = amdgpu_device_parse_gpu_info_fw(adev);
1537 if (r)
1538 return r;
1539
pding18847342017-11-06 10:21:26 +08001540 amdgpu_amdkfd_device_probe(adev);
1541
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001542 if (amdgpu_sriov_vf(adev)) {
1543 r = amdgpu_virt_request_full_gpu(adev, true);
1544 if (r)
pding5ffa61c2017-10-30 14:07:24 +08001545 return -EAGAIN;
Xiangliang Yu3149d9d2017-01-12 15:14:36 +08001546 }
1547
Huang Rui00f54b92018-02-27 21:53:00 +08001548 adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
1549
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001550 for (i = 0; i < adev->num_ip_blocks; i++) {
1551 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
Huang Ruied8cf002017-05-03 09:40:17 +08001552 DRM_ERROR("disabled ip block: %d <%s>\n",
1553 i, adev->ip_blocks[i].version->funcs->name);
Alex Deuchera1255102016-10-13 17:41:13 -04001554 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001555 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001556 if (adev->ip_blocks[i].version->funcs->early_init) {
1557 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001558 if (r == -ENOENT) {
Alex Deuchera1255102016-10-13 17:41:13 -04001559 adev->ip_blocks[i].status.valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001560 } else if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001561 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1562 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001563 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001564 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001565 adev->ip_blocks[i].status.valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001566 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001567 } else {
Alex Deuchera1255102016-10-13 17:41:13 -04001568 adev->ip_blocks[i].status.valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001569 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001570 }
1571 }
1572
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001573 adev->cg_flags &= amdgpu_cg_mask;
1574 adev->pg_flags &= amdgpu_pg_mask;
1575
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001576 return 0;
1577}
1578
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001579/**
1580 * amdgpu_device_ip_init - run init for hardware IPs
1581 *
1582 * @adev: amdgpu_device pointer
1583 *
1584 * Main initialization pass for hardware IPs. The list of all the hardware
1585 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
1586 * are run. sw_init initializes the software state associated with each IP
1587 * and hw_init initializes the hardware associated with each IP.
1588 * Returns 0 on success, negative error code on failure.
1589 */
Alex Deucher06ec9072017-12-14 15:02:39 -05001590static int amdgpu_device_ip_init(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001591{
1592 int i, r;
1593
1594 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001595 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001596 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001597 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001598 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001599 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1600 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001601 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001602 }
Alex Deuchera1255102016-10-13 17:41:13 -04001603 adev->ip_blocks[i].status.sw = true;
Shaoyun Liubfca0282018-02-01 17:37:50 -05001604
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001605 /* need to do gmc hw init early so we can allocate gpu mem */
Alex Deuchera1255102016-10-13 17:41:13 -04001606 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucher06ec9072017-12-14 15:02:39 -05001607 r = amdgpu_device_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001608 if (r) {
1609 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001610 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001611 }
Alex Deuchera1255102016-10-13 17:41:13 -04001612 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001613 if (r) {
1614 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001615 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001616 }
Alex Deucher06ec9072017-12-14 15:02:39 -05001617 r = amdgpu_device_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001618 if (r) {
Alex Deucher06ec9072017-12-14 15:02:39 -05001619 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001620 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001621 }
Alex Deuchera1255102016-10-13 17:41:13 -04001622 adev->ip_blocks[i].status.hw = true;
Monk Liu24936642017-01-09 15:54:32 +08001623
1624 /* right after GMC hw init, we create CSA */
1625 if (amdgpu_sriov_vf(adev)) {
1626 r = amdgpu_allocate_static_csa(adev);
1627 if (r) {
1628 DRM_ERROR("allocate CSA failed %d\n", r);
1629 return r;
1630 }
1631 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001632 }
1633 }
1634
1635 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001636 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001637 continue;
Shaoyun Liubfca0282018-02-01 17:37:50 -05001638 if (adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001639 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001640 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001641 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001642 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1643 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001644 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001645 }
Alex Deuchera1255102016-10-13 17:41:13 -04001646 adev->ip_blocks[i].status.hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001647 }
1648
pding18847342017-11-06 10:21:26 +08001649 amdgpu_amdkfd_device_init(adev);
pdingc6332b92017-11-06 11:21:55 +08001650
1651 if (amdgpu_sriov_vf(adev))
1652 amdgpu_virt_release_full_gpu(adev, true);
1653
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001654 return 0;
1655}
1656
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001657/**
1658 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
1659 *
1660 * @adev: amdgpu_device pointer
1661 *
1662 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
1663 * this function before a GPU reset. If the value is retained after a
1664 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
1665 */
Alex Deucher06ec9072017-12-14 15:02:39 -05001666static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001667{
1668 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
1669}
1670
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001671/**
1672 * amdgpu_device_check_vram_lost - check if vram is valid
1673 *
1674 * @adev: amdgpu_device pointer
1675 *
1676 * Checks the reset magic value written to the gart pointer in VRAM.
1677 * The driver calls this after a GPU reset to see if the contents of
1678 * VRAM is lost or now.
1679 * returns true if vram is lost, false if not.
1680 */
Alex Deucher06ec9072017-12-14 15:02:39 -05001681static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001682{
1683 return !!memcmp(adev->gart.ptr, adev->reset_magic,
1684 AMDGPU_RESET_MAGIC_NUM);
1685}
1686
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001687/**
1688 * amdgpu_device_ip_late_set_cg_state - late init for clockgating
1689 *
1690 * @adev: amdgpu_device pointer
1691 *
1692 * Late initialization pass enabling clockgating for hardware IPs.
1693 * The list of all the hardware IPs that make up the asic is walked and the
1694 * set_clockgating_state callbacks are run. This stage is run late
1695 * in the init process.
1696 * Returns 0 on success, negative error code on failure.
1697 */
Alex Deucher06ec9072017-12-14 15:02:39 -05001698static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
Shirish S2dc80b02017-05-25 10:05:25 +05301699{
1700 int i = 0, r;
1701
Shaoyun Liu4a2ba392018-02-05 16:41:33 -05001702 if (amdgpu_emu_mode == 1)
1703 return 0;
1704
Shirish S2c773de2018-04-16 12:17:57 +05301705 r = amdgpu_ib_ring_tests(adev);
1706 if (r)
1707 DRM_ERROR("ib ring test failed (%d).\n", r);
1708
Shirish S2dc80b02017-05-25 10:05:25 +05301709 for (i = 0; i < adev->num_ip_blocks; i++) {
1710 if (!adev->ip_blocks[i].status.valid)
1711 continue;
1712 /* skip CG for VCE/UVD, it's handled specially */
1713 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
Rex Zhu57716322018-03-12 19:50:38 +08001714 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1715 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
Shirish S2dc80b02017-05-25 10:05:25 +05301716 /* enable clockgating to save power */
1717 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1718 AMD_CG_STATE_GATE);
1719 if (r) {
1720 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1721 adev->ip_blocks[i].version->funcs->name, r);
1722 return r;
1723 }
1724 }
1725 }
1726 return 0;
1727}
1728
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001729/**
1730 * amdgpu_device_ip_late_init - run late init for hardware IPs
1731 *
1732 * @adev: amdgpu_device pointer
1733 *
1734 * Late initialization pass for hardware IPs. The list of all the hardware
1735 * IPs that make up the asic is walked and the late_init callbacks are run.
1736 * late_init covers any special initialization that an IP requires
1737 * after all of the have been initialized or something that needs to happen
1738 * late in the init process.
1739 * Returns 0 on success, negative error code on failure.
1740 */
Alex Deucher06ec9072017-12-14 15:02:39 -05001741static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001742{
1743 int i = 0, r;
1744
1745 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001746 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001747 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001748 if (adev->ip_blocks[i].version->funcs->late_init) {
1749 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001750 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001751 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1752 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001753 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001754 }
Alex Deuchera1255102016-10-13 17:41:13 -04001755 adev->ip_blocks[i].status.late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001756 }
1757 }
1758
Shirish S2c773de2018-04-16 12:17:57 +05301759 queue_delayed_work(system_wq, &adev->late_init_work,
1760 msecs_to_jiffies(AMDGPU_RESUME_MS));
Shirish S2dc80b02017-05-25 10:05:25 +05301761
Alex Deucher06ec9072017-12-14 15:02:39 -05001762 amdgpu_device_fill_reset_magic(adev);
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08001763
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001764 return 0;
1765}
1766
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001767/**
1768 * amdgpu_device_ip_fini - run fini for hardware IPs
1769 *
1770 * @adev: amdgpu_device pointer
1771 *
1772 * Main teardown pass for hardware IPs. The list of all the hardware
1773 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
1774 * are run. hw_fini tears down the hardware associated with each IP
1775 * and sw_fini tears down any software state associated with each IP.
1776 * Returns 0 on success, negative error code on failure.
1777 */
Alex Deucher06ec9072017-12-14 15:02:39 -05001778static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001779{
1780 int i, r;
1781
pding18847342017-11-06 10:21:26 +08001782 amdgpu_amdkfd_device_fini(adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001783 /* need to disable SMC first */
1784 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04001785 if (!adev->ip_blocks[i].status.hw)
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001786 continue;
Rex Zhu57716322018-03-12 19:50:38 +08001787 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC &&
1788 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001789 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
Alex Deuchera1255102016-10-13 17:41:13 -04001790 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1791 AMD_CG_STATE_UNGATE);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001792 if (r) {
1793 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001794 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001795 return r;
1796 }
Alex Deuchera1255102016-10-13 17:41:13 -04001797 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001798 /* XXX handle errors */
1799 if (r) {
1800 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
Alex Deuchera1255102016-10-13 17:41:13 -04001801 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001802 }
Alex Deuchera1255102016-10-13 17:41:13 -04001803 adev->ip_blocks[i].status.hw = false;
Alex Deucher3e96dbf2016-10-13 11:22:17 -04001804 break;
1805 }
1806 }
1807
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001808 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001809 if (!adev->ip_blocks[i].status.hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001810 continue;
Rex Zhu8201a672016-11-24 21:44:44 +08001811
1812 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
Rex Zhu81ce8be2018-03-20 16:28:56 +08001813 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
1814 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
Rex Zhu8201a672016-11-24 21:44:44 +08001815 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1816 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1817 AMD_CG_STATE_UNGATE);
1818 if (r) {
1819 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1820 adev->ip_blocks[i].version->funcs->name, r);
1821 return r;
1822 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001823 }
Rex Zhu8201a672016-11-24 21:44:44 +08001824
Alex Deuchera1255102016-10-13 17:41:13 -04001825 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001826 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001827 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001828 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1829 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001830 }
Rex Zhu8201a672016-11-24 21:44:44 +08001831
Alex Deuchera1255102016-10-13 17:41:13 -04001832 adev->ip_blocks[i].status.hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001833 }
1834
Alex Deucher9950cda2018-01-18 19:05:36 -05001835
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001836 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001837 if (!adev->ip_blocks[i].status.sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001838 continue;
Monk Liuc12aba32018-01-24 12:20:32 +08001839
1840 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1841 amdgpu_free_static_csa(adev);
1842 amdgpu_device_wb_fini(adev);
1843 amdgpu_device_vram_scratch_fini(adev);
1844 }
1845
Alex Deuchera1255102016-10-13 17:41:13 -04001846 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001847 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001848 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001849 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1850 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001851 }
Alex Deuchera1255102016-10-13 17:41:13 -04001852 adev->ip_blocks[i].status.sw = false;
1853 adev->ip_blocks[i].status.valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001854 }
1855
Monk Liua6dcfd92016-05-19 14:36:34 +08001856 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001857 if (!adev->ip_blocks[i].status.late_initialized)
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001858 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04001859 if (adev->ip_blocks[i].version->funcs->late_fini)
1860 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1861 adev->ip_blocks[i].status.late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001862 }
1863
Monk Liu030308f2017-09-15 15:34:52 +08001864 if (amdgpu_sriov_vf(adev))
Monk Liu24136132017-11-14 16:56:55 +08001865 if (amdgpu_virt_release_full_gpu(adev, false))
1866 DRM_ERROR("failed to release exclusive mode on fini\n");
Monk Liu24936642017-01-09 15:54:32 +08001867
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001868 return 0;
1869}
1870
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001871/**
1872 * amdgpu_device_ip_late_init_func_handler - work handler for clockgating
1873 *
1874 * @work: work_struct
1875 *
1876 * Work handler for amdgpu_device_ip_late_set_cg_state. We put the
1877 * clockgating setup into a worker thread to speed up driver init and
1878 * resume from suspend.
1879 */
Alex Deucher06ec9072017-12-14 15:02:39 -05001880static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
Shirish S2dc80b02017-05-25 10:05:25 +05301881{
1882 struct amdgpu_device *adev =
1883 container_of(work, struct amdgpu_device, late_init_work.work);
Alex Deucher06ec9072017-12-14 15:02:39 -05001884 amdgpu_device_ip_late_set_cg_state(adev);
Shirish S2dc80b02017-05-25 10:05:25 +05301885}
1886
Alex Deuchere3ecdff2018-03-15 17:39:45 -05001887/**
1888 * amdgpu_device_ip_suspend - run suspend for hardware IPs
1889 *
1890 * @adev: amdgpu_device pointer
1891 *
1892 * Main suspend function for hardware IPs. The list of all the hardware
1893 * IPs that make up the asic is walked, clockgating is disabled and the
1894 * suspend callbacks are run. suspend puts the hardware and software state
1895 * in each IP into a state suitable for suspend.
1896 * Returns 0 on success, negative error code on failure.
1897 */
Alex Deuchercdd61df2017-12-14 16:47:40 -05001898int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001899{
1900 int i, r;
1901
Xiangliang Yue941ea92017-01-18 12:47:55 +08001902 if (amdgpu_sriov_vf(adev))
1903 amdgpu_virt_request_full_gpu(adev, false);
1904
Flora Cuic5a93a22016-02-26 10:45:25 +08001905 /* ungate SMC block first */
Alex Deucher2990a1f2017-12-15 16:18:00 -05001906 r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1907 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001908 if (r) {
Alex Deucher2990a1f2017-12-15 16:18:00 -05001909 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001910 }
1911
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001912 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deuchera1255102016-10-13 17:41:13 -04001913 if (!adev->ip_blocks[i].status.valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001914 continue;
1915 /* ungate blocks so that suspend can properly shut them down */
Rex Zhu5b2a3d22018-03-14 15:38:48 +08001916 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
Rex Zhu57716322018-03-12 19:50:38 +08001917 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
Alex Deuchera1255102016-10-13 17:41:13 -04001918 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1919 AMD_CG_STATE_UNGATE);
Flora Cuic5a93a22016-02-26 10:45:25 +08001920 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001921 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1922 adev->ip_blocks[i].version->funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001923 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001924 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001925 /* XXX handle errors */
Alex Deuchera1255102016-10-13 17:41:13 -04001926 r = adev->ip_blocks[i].version->funcs->suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001927 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001928 if (r) {
Alex Deuchera1255102016-10-13 17:41:13 -04001929 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1930 adev->ip_blocks[i].version->funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001931 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001932 }
1933
Xiangliang Yue941ea92017-01-18 12:47:55 +08001934 if (amdgpu_sriov_vf(adev))
1935 amdgpu_virt_release_full_gpu(adev, false);
1936
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001937 return 0;
1938}
1939
Alex Deucher06ec9072017-12-14 15:02:39 -05001940static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001941{
1942 int i, r;
1943
Monk Liu2cb681b2017-04-26 12:00:49 +08001944 static enum amd_ip_block_type ip_order[] = {
1945 AMD_IP_BLOCK_TYPE_GMC,
1946 AMD_IP_BLOCK_TYPE_COMMON,
Monk Liu2cb681b2017-04-26 12:00:49 +08001947 AMD_IP_BLOCK_TYPE_IH,
1948 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001949
Monk Liu2cb681b2017-04-26 12:00:49 +08001950 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1951 int j;
1952 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001953
Monk Liu2cb681b2017-04-26 12:00:49 +08001954 for (j = 0; j < adev->num_ip_blocks; j++) {
1955 block = &adev->ip_blocks[j];
1956
1957 if (block->version->type != ip_order[i] ||
1958 !block->status.valid)
1959 continue;
1960
1961 r = block->version->funcs->hw_init(adev);
1962 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liuc41d1cf2017-12-25 11:59:27 +08001963 if (r)
1964 return r;
Monk Liua90ad3c2017-01-23 14:22:08 +08001965 }
1966 }
1967
1968 return 0;
1969}
1970
Alex Deucher06ec9072017-12-14 15:02:39 -05001971static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08001972{
1973 int i, r;
1974
Monk Liu2cb681b2017-04-26 12:00:49 +08001975 static enum amd_ip_block_type ip_order[] = {
1976 AMD_IP_BLOCK_TYPE_SMC,
Monk Liuef4c1662017-09-22 16:23:34 +08001977 AMD_IP_BLOCK_TYPE_PSP,
Monk Liu2cb681b2017-04-26 12:00:49 +08001978 AMD_IP_BLOCK_TYPE_DCE,
1979 AMD_IP_BLOCK_TYPE_GFX,
1980 AMD_IP_BLOCK_TYPE_SDMA,
Frank Min257deb82017-06-15 20:07:36 +08001981 AMD_IP_BLOCK_TYPE_UVD,
1982 AMD_IP_BLOCK_TYPE_VCE
Monk Liu2cb681b2017-04-26 12:00:49 +08001983 };
Monk Liua90ad3c2017-01-23 14:22:08 +08001984
Monk Liu2cb681b2017-04-26 12:00:49 +08001985 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
1986 int j;
1987 struct amdgpu_ip_block *block;
Monk Liua90ad3c2017-01-23 14:22:08 +08001988
Monk Liu2cb681b2017-04-26 12:00:49 +08001989 for (j = 0; j < adev->num_ip_blocks; j++) {
1990 block = &adev->ip_blocks[j];
1991
1992 if (block->version->type != ip_order[i] ||
1993 !block->status.valid)
1994 continue;
1995
1996 r = block->version->funcs->hw_init(adev);
1997 DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
Monk Liuc41d1cf2017-12-25 11:59:27 +08001998 if (r)
1999 return r;
Monk Liua90ad3c2017-01-23 14:22:08 +08002000 }
2001 }
2002
2003 return 0;
2004}
2005
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002006/**
2007 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
2008 *
2009 * @adev: amdgpu_device pointer
2010 *
2011 * First resume function for hardware IPs. The list of all the hardware
2012 * IPs that make up the asic is walked and the resume callbacks are run for
2013 * COMMON, GMC, and IH. resume puts the hardware into a functional state
2014 * after a suspend and updates the software state as necessary. This
2015 * function is also used for restoring the GPU after a GPU reset.
2016 * Returns 0 on success, negative error code on failure.
2017 */
Alex Deucher06ec9072017-12-14 15:02:39 -05002018static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002019{
2020 int i, r;
2021
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002022 for (i = 0; i < adev->num_ip_blocks; i++) {
2023 if (!adev->ip_blocks[i].status.valid)
2024 continue;
Chunming Zhoufcf06492017-05-05 10:33:33 +08002025 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002026 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2027 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
Chunming Zhoufcf06492017-05-05 10:33:33 +08002028 r = adev->ip_blocks[i].version->funcs->resume(adev);
2029 if (r) {
2030 DRM_ERROR("resume of IP block <%s> failed %d\n",
2031 adev->ip_blocks[i].version->funcs->name, r);
2032 return r;
2033 }
2034 }
2035 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002036
Chunming Zhoufcf06492017-05-05 10:33:33 +08002037 return 0;
2038}
2039
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002040/**
2041 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
2042 *
2043 * @adev: amdgpu_device pointer
2044 *
2045 * First resume function for hardware IPs. The list of all the hardware
2046 * IPs that make up the asic is walked and the resume callbacks are run for
2047 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
2048 * functional state after a suspend and updates the software state as
2049 * necessary. This function is also used for restoring the GPU after a GPU
2050 * reset.
2051 * Returns 0 on success, negative error code on failure.
2052 */
Alex Deucher06ec9072017-12-14 15:02:39 -05002053static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
Chunming Zhoufcf06492017-05-05 10:33:33 +08002054{
2055 int i, r;
2056
2057 for (i = 0; i < adev->num_ip_blocks; i++) {
2058 if (!adev->ip_blocks[i].status.valid)
2059 continue;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002060 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002061 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2062 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002063 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002064 r = adev->ip_blocks[i].version->funcs->resume(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002065 if (r) {
2066 DRM_ERROR("resume of IP block <%s> failed %d\n",
2067 adev->ip_blocks[i].version->funcs->name, r);
2068 return r;
2069 }
2070 }
2071
2072 return 0;
2073}
2074
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002075/**
2076 * amdgpu_device_ip_resume - run resume for hardware IPs
2077 *
2078 * @adev: amdgpu_device pointer
2079 *
2080 * Main resume function for hardware IPs. The hardware IPs
2081 * are split into two resume functions because they are
2082 * are also used in in recovering from a GPU reset and some additional
2083 * steps need to be take between them. In this case (S3/S4) they are
2084 * run sequentially.
2085 * Returns 0 on success, negative error code on failure.
2086 */
Alex Deucher06ec9072017-12-14 15:02:39 -05002087static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002088{
Chunming Zhoufcf06492017-05-05 10:33:33 +08002089 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002090
Alex Deucher06ec9072017-12-14 15:02:39 -05002091 r = amdgpu_device_ip_resume_phase1(adev);
Chunming Zhoufcf06492017-05-05 10:33:33 +08002092 if (r)
2093 return r;
Alex Deucher06ec9072017-12-14 15:02:39 -05002094 r = amdgpu_device_ip_resume_phase2(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002095
Chunming Zhoufcf06492017-05-05 10:33:33 +08002096 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002097}
2098
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002099/**
2100 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
2101 *
2102 * @adev: amdgpu_device pointer
2103 *
2104 * Query the VBIOS data tables to determine if the board supports SR-IOV.
2105 */
Monk Liu4e99a442016-03-31 13:26:59 +08002106static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04002107{
Monk Liu6867e1b2017-10-16 19:50:44 +08002108 if (amdgpu_sriov_vf(adev)) {
2109 if (adev->is_atom_fw) {
2110 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
2111 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2112 } else {
2113 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
2114 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
2115 }
2116
2117 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
2118 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002119 }
Andres Rodriguez048765a2016-06-11 02:51:32 -04002120}
2121
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002122/**
2123 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
2124 *
2125 * @asic_type: AMD asic type
2126 *
2127 * Check if there is DC (new modesetting infrastructre) support for an asic.
2128 * returns true if DC has support, false if not.
2129 */
Harry Wentland45622362017-09-12 15:58:20 -04002130bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2131{
2132 switch (asic_type) {
2133#if defined(CONFIG_DRM_AMD_DC)
2134 case CHIP_BONAIRE:
2135 case CHIP_HAWAII:
Alex Deucher0d6fbcc2017-08-10 14:39:48 -04002136 case CHIP_KAVERI:
Alex Deucher367e6682018-01-25 16:53:25 -05002137 case CHIP_KABINI:
2138 case CHIP_MULLINS:
Harry Wentland45622362017-09-12 15:58:20 -04002139 case CHIP_CARRIZO:
2140 case CHIP_STONEY:
2141 case CHIP_POLARIS11:
2142 case CHIP_POLARIS10:
Alex Deucher2c8ad2d2017-06-15 16:20:24 -04002143 case CHIP_POLARIS12:
Harry Wentland45622362017-09-12 15:58:20 -04002144 case CHIP_TONGA:
2145 case CHIP_FIJI:
Harry Wentland42f8ffa2017-09-15 14:07:30 -04002146 case CHIP_VEGA10:
Alex Deucherdca7b402017-09-02 02:05:29 -04002147 case CHIP_VEGA12:
Harry Wentland42f8ffa2017-09-15 14:07:30 -04002148#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
Hawking Zhangfd187852017-03-06 14:01:11 +08002149 case CHIP_RAVEN:
Harry Wentland42f8ffa2017-09-15 14:07:30 -04002150#endif
Hawking Zhangfd187852017-03-06 14:01:11 +08002151 return amdgpu_dc != 0;
2152#endif
Harry Wentland45622362017-09-12 15:58:20 -04002153 default:
2154 return false;
2155 }
2156}
2157
2158/**
2159 * amdgpu_device_has_dc_support - check if dc is supported
2160 *
2161 * @adev: amdgpu_device_pointer
2162 *
2163 * Returns true for supported, false for not supported
2164 */
2165bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
2166{
Xiangliang Yu2555039d2017-01-10 17:34:52 +08002167 if (amdgpu_sriov_vf(adev))
2168 return false;
2169
Harry Wentland45622362017-09-12 15:58:20 -04002170 return amdgpu_device_asic_has_dc_support(adev->asic_type);
2171}
2172
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002173/**
2174 * amdgpu_device_init - initialize the driver
2175 *
2176 * @adev: amdgpu_device pointer
2177 * @pdev: drm dev pointer
2178 * @pdev: pci dev pointer
2179 * @flags: driver flags
2180 *
2181 * Initializes the driver info and hw (all asics).
2182 * Returns 0 for success or an error on failure.
2183 * Called at driver startup.
2184 */
2185int amdgpu_device_init(struct amdgpu_device *adev,
2186 struct drm_device *ddev,
2187 struct pci_dev *pdev,
2188 uint32_t flags)
2189{
2190 int r, i;
2191 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02002192 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002193
2194 adev->shutdown = false;
2195 adev->dev = &pdev->dev;
2196 adev->ddev = ddev;
2197 adev->pdev = pdev;
2198 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08002199 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002200 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
Shaoyun Liu593aa2d2018-02-07 14:43:13 -05002201 if (amdgpu_emu_mode == 1)
2202 adev->usec_timeout *= 2;
Christian König770d13b2018-01-12 14:52:22 +01002203 adev->gmc.gart_size = 512 * 1024 * 1024;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002204 adev->accel_working = false;
2205 adev->num_rings = 0;
2206 adev->mman.buffer_funcs = NULL;
2207 adev->mman.buffer_funcs_ring = NULL;
2208 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01002209 adev->vm_manager.vm_pte_num_rings = 0;
Christian König132f34e2018-01-12 15:26:08 +01002210 adev->gmc.gmc_funcs = NULL;
Chris Wilsonf54d1862016-10-25 13:00:45 +01002211 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
Andres Rodriguezb8866c22017-04-28 20:05:51 -04002212 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002213
2214 adev->smc_rreg = &amdgpu_invalid_rreg;
2215 adev->smc_wreg = &amdgpu_invalid_wreg;
2216 adev->pcie_rreg = &amdgpu_invalid_rreg;
2217 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08002218 adev->pciep_rreg = &amdgpu_invalid_rreg;
2219 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002220 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
2221 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
2222 adev->didt_rreg = &amdgpu_invalid_rreg;
2223 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08002224 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
2225 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002226 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
2227 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
2228
Alex Deucher3e39ab92015-06-05 15:04:33 -04002229 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
2230 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
2231 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002232
2233 /* mutex initialization are all done here so we
2234 * can recall function without having locking issues */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002235 atomic_set(&adev->irq.ih.lock, 0);
Huang Rui0e5ca0d2017-03-03 18:37:23 -05002236 mutex_init(&adev->firmware.mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002237 mutex_init(&adev->pm.mutex);
2238 mutex_init(&adev->gfx.gpu_clock_mutex);
2239 mutex_init(&adev->srbm_mutex);
Andres Rodriguezb8866c22017-04-28 20:05:51 -04002240 mutex_init(&adev->gfx.pipe_reserve_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002241 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002242 mutex_init(&adev->mn_lock);
Alex Deuchere23b74a2017-09-28 09:47:32 -04002243 mutex_init(&adev->virt.vf_errors.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002244 hash_init(adev->mn_hash);
Monk Liu13a752e2017-10-17 15:11:12 +08002245 mutex_init(&adev->lock_reset);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002246
Alex Deucher06ec9072017-12-14 15:02:39 -05002247 amdgpu_device_check_arguments(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002248
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002249 spin_lock_init(&adev->mmio_idx_lock);
2250 spin_lock_init(&adev->smc_idx_lock);
2251 spin_lock_init(&adev->pcie_idx_lock);
2252 spin_lock_init(&adev->uvd_ctx_idx_lock);
2253 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08002254 spin_lock_init(&adev->gc_cac_idx_lock);
Evan Quan16abb5d2017-07-04 09:21:50 +08002255 spin_lock_init(&adev->se_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002256 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02002257 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002258
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08002259 INIT_LIST_HEAD(&adev->shadow_list);
2260 mutex_init(&adev->shadow_list_lock);
2261
Andres Rodriguez795f2812017-03-06 16:27:55 -05002262 INIT_LIST_HEAD(&adev->ring_lru_list);
2263 spin_lock_init(&adev->ring_lru_list_lock);
2264
Alex Deucher06ec9072017-12-14 15:02:39 -05002265 INIT_DELAYED_WORK(&adev->late_init_work,
2266 amdgpu_device_ip_late_init_func_handler);
Shirish S2dc80b02017-05-25 10:05:25 +05302267
Alex Xie0fa49552017-06-08 14:58:05 -04002268 /* Registers mapping */
2269 /* TODO: block userspace mapping of io register */
Ken Wangda69c1612016-01-21 19:08:55 +08002270 if (adev->asic_type >= CHIP_BONAIRE) {
2271 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
2272 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
2273 } else {
2274 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
2275 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
2276 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08002277
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002278 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
2279 if (adev->rmmio == NULL) {
2280 return -ENOMEM;
2281 }
2282 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
2283 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
2284
Christian König705e5192017-06-08 11:15:16 +02002285 /* doorbell bar mapping */
Alex Deucher06ec9072017-12-14 15:02:39 -05002286 amdgpu_device_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002287
2288 /* io port mapping */
2289 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2290 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
2291 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
2292 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
2293 break;
2294 }
2295 }
2296 if (adev->rio_mem == NULL)
Amber Linb64a18c2017-01-04 08:06:58 -05002297 DRM_INFO("PCI I/O BAR is not found.\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002298
Alex Deucher5494d862018-03-09 15:14:11 -05002299 amdgpu_device_get_pcie_info(adev);
2300
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002301 /* early init functions */
Alex Deucher06ec9072017-12-14 15:02:39 -05002302 r = amdgpu_device_ip_early_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002303 if (r)
2304 return r;
2305
2306 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
2307 /* this will fail for cards that aren't VGA class devices, just
2308 * ignore it */
Alex Deucher06ec9072017-12-14 15:02:39 -05002309 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002310
Alex Deuchere9bef452016-04-25 13:12:18 -04002311 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002312 runtime = true;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002313 if (!pci_is_thunderbolt_attached(adev->pdev))
2314 vga_switcheroo_register_client(adev->pdev,
2315 &amdgpu_switcheroo_ops, runtime);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002316 if (runtime)
2317 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
2318
Shaoyun Liu9475a942018-02-01 18:13:23 -05002319 if (amdgpu_emu_mode == 1) {
2320 /* post the asic on emulation mode */
2321 emu_soc_asic_init(adev);
Shaoyun Liubfca0282018-02-01 17:37:50 -05002322 goto fence_driver_init;
Shaoyun Liu9475a942018-02-01 18:13:23 -05002323 }
Shaoyun Liubfca0282018-02-01 17:37:50 -05002324
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002325 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04002326 if (!amdgpu_get_bios(adev)) {
2327 r = -EINVAL;
2328 goto failed;
2329 }
Nils Wallméniusf7e9e9f2016-12-14 21:52:45 +01002330
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002331 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002332 if (r) {
2333 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002334 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002335 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002336 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002337
Monk Liu4e99a442016-03-31 13:26:59 +08002338 /* detect if we are with an SRIOV vbios */
2339 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04002340
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002341 /* Post card if necessary */
Alex Deucher39c640c2017-12-15 16:22:11 -05002342 if (amdgpu_device_need_post(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002343 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08002344 dev_err(adev->dev, "no vBIOS found\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04002345 r = -EINVAL;
2346 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002347 }
Monk Liubec86372016-09-14 19:38:08 +08002348 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08002349 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2350 if (r) {
2351 dev_err(adev->dev, "gpu post error!\n");
2352 goto failed;
2353 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002354 }
2355
Alex Deucher88b64e92017-07-10 10:43:10 -04002356 if (adev->is_atom_fw) {
2357 /* Initialize clocks */
2358 r = amdgpu_atomfirmware_get_clock_info(adev);
2359 if (r) {
2360 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002361 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
Alex Deucher88b64e92017-07-10 10:43:10 -04002362 goto failed;
2363 }
2364 } else {
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002365 /* Initialize clocks */
2366 r = amdgpu_atombios_get_clock_info(adev);
2367 if (r) {
2368 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002369 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
Gavin Wan89041942017-06-23 13:55:15 -04002370 goto failed;
Alex Deuchera5bde2f2016-09-23 16:23:41 -04002371 }
2372 /* init i2c buses */
Harry Wentland45622362017-09-12 15:58:20 -04002373 if (!amdgpu_device_has_dc_support(adev))
2374 amdgpu_atombios_i2c_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002375 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002376
Shaoyun Liubfca0282018-02-01 17:37:50 -05002377fence_driver_init:
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002378 /* Fence driver */
2379 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002380 if (r) {
2381 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002382 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002383 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002384 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002385
2386 /* init the mode config */
2387 drm_mode_config_init(adev->ddev);
2388
Alex Deucher06ec9072017-12-14 15:02:39 -05002389 r = amdgpu_device_ip_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002390 if (r) {
pding8840a382017-10-23 17:22:09 +08002391 /* failed in exclusive mode due to timeout */
2392 if (amdgpu_sriov_vf(adev) &&
2393 !amdgpu_sriov_runtime(adev) &&
2394 amdgpu_virt_mmio_blocked(adev) &&
2395 !amdgpu_virt_wait_reset(adev)) {
2396 dev_err(adev->dev, "VF exclusive mode timeout\n");
Pixel Ding1daee8b2017-11-08 11:03:14 +08002397 /* Don't send request since VF is inactive. */
2398 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
2399 adev->virt.ops = NULL;
pding8840a382017-10-23 17:22:09 +08002400 r = -EAGAIN;
2401 goto failed;
2402 }
Alex Deucher06ec9072017-12-14 15:02:39 -05002403 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002404 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
Alex Deucher83ba1262016-06-03 18:21:41 -04002405 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002406 }
2407
2408 adev->accel_working = true;
2409
Alex Xiee59c0202017-06-01 09:42:59 -04002410 amdgpu_vm_check_compute_bug(adev);
2411
Marek Olšák95844d22016-08-17 23:49:27 +02002412 /* Initialize the buffer migration limit. */
2413 if (amdgpu_moverate >= 0)
2414 max_MBps = amdgpu_moverate;
2415 else
2416 max_MBps = 8; /* Allow 8 MB/s. */
2417 /* Get a log2 for easy divisions. */
2418 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
2419
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002420 r = amdgpu_ib_pool_init(adev);
2421 if (r) {
2422 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deuchere23b74a2017-09-28 09:47:32 -04002423 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002424 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002425 }
2426
Horace Chen2dc8f812017-10-09 16:17:16 +08002427 if (amdgpu_sriov_vf(adev))
2428 amdgpu_virt_init_data_exchange(adev);
2429
Monk Liu9bc92b92017-02-08 17:38:13 +08002430 amdgpu_fbdev_init(adev);
2431
Rex Zhud2f52ac2017-09-22 17:47:27 +08002432 r = amdgpu_pm_sysfs_init(adev);
2433 if (r)
2434 DRM_ERROR("registering pm debugfs failed (%d).\n", r);
2435
Alex Deucher75758252017-12-14 15:23:14 -05002436 r = amdgpu_debugfs_gem_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002437 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002438 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002439
2440 r = amdgpu_debugfs_regs_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002441 if (r)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002442 DRM_ERROR("registering register debugfs failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002443
Huang Rui50ab2532016-06-12 15:51:09 +08002444 r = amdgpu_debugfs_firmware_init(adev);
Monk Liu3f14e622017-02-09 13:42:27 +08002445 if (r)
Huang Rui50ab2532016-06-12 15:51:09 +08002446 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
Huang Rui50ab2532016-06-12 15:51:09 +08002447
Christian König763efb62017-12-06 15:44:51 +01002448 r = amdgpu_debugfs_init(adev);
Kent Russelldb95e212017-08-22 12:31:43 -04002449 if (r)
Christian König763efb62017-12-06 15:44:51 +01002450 DRM_ERROR("Creating debugfs files failed (%d).\n", r);
Kent Russelldb95e212017-08-22 12:31:43 -04002451
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002452 if ((amdgpu_testing & 1)) {
2453 if (adev->accel_working)
2454 amdgpu_test_moves(adev);
2455 else
2456 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2457 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002458 if (amdgpu_benchmarking) {
2459 if (adev->accel_working)
2460 amdgpu_benchmark(adev, amdgpu_benchmarking);
2461 else
2462 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2463 }
2464
2465 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2466 * explicit gating rather than handling it automatically.
2467 */
Alex Deucher06ec9072017-12-14 15:02:39 -05002468 r = amdgpu_device_ip_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05002469 if (r) {
Alex Deucher06ec9072017-12-14 15:02:39 -05002470 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
Alex Deuchere23b74a2017-09-28 09:47:32 -04002471 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
Alex Deucher83ba1262016-06-03 18:21:41 -04002472 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05002473 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002474
2475 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04002476
2477failed:
Gavin Wan89041942017-06-23 13:55:15 -04002478 amdgpu_vf_error_trans_all(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002479 if (runtime)
2480 vga_switcheroo_fini_domain_pm_ops(adev->dev);
pding8840a382017-10-23 17:22:09 +08002481
Alex Deucher83ba1262016-06-03 18:21:41 -04002482 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002483}
2484
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002485/**
2486 * amdgpu_device_fini - tear down the driver
2487 *
2488 * @adev: amdgpu_device pointer
2489 *
2490 * Tear down the driver info (all asics).
2491 * Called at driver shutdown.
2492 */
2493void amdgpu_device_fini(struct amdgpu_device *adev)
2494{
2495 int r;
2496
2497 DRM_INFO("amdgpu: finishing device.\n");
2498 adev->shutdown = true;
Mikita Lipskie5b03032018-03-15 16:53:08 -04002499 /* disable all interrupts */
2500 amdgpu_irq_disable_all(adev);
Mikita Lipskiff97cba2018-03-14 13:41:29 -04002501 if (adev->mode_info.mode_config_initialized){
2502 if (!amdgpu_device_has_dc_support(adev))
2503 drm_crtc_force_disable_all(adev->ddev);
2504 else
2505 drm_atomic_helper_shutdown(adev->ddev);
2506 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002507 amdgpu_ib_pool_fini(adev);
2508 amdgpu_fence_driver_fini(adev);
Emily Deng58e955d2018-03-08 09:35:19 +08002509 amdgpu_pm_sysfs_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002510 amdgpu_fbdev_fini(adev);
Alex Deucher06ec9072017-12-14 15:02:39 -05002511 r = amdgpu_device_ip_fini(adev);
Huang Ruiab4fe3e2017-06-05 22:11:59 +08002512 if (adev->firmware.gpu_info_fw) {
2513 release_firmware(adev->firmware.gpu_info_fw);
2514 adev->firmware.gpu_info_fw = NULL;
2515 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002516 adev->accel_working = false;
Shirish S2dc80b02017-05-25 10:05:25 +05302517 cancel_delayed_work_sync(&adev->late_init_work);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002518 /* free i2c buses */
Harry Wentland45622362017-09-12 15:58:20 -04002519 if (!amdgpu_device_has_dc_support(adev))
2520 amdgpu_i2c_fini(adev);
Shaoyun Liubfca0282018-02-01 17:37:50 -05002521
2522 if (amdgpu_emu_mode != 1)
2523 amdgpu_atombios_fini(adev);
2524
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002525 kfree(adev->bios);
2526 adev->bios = NULL;
Lukas Wunner84c8b222017-03-10 21:23:45 +01002527 if (!pci_is_thunderbolt_attached(adev->pdev))
2528 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04002529 if (adev->flags & AMD_IS_PX)
2530 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002531 vga_client_register(adev->pdev, NULL, NULL, NULL);
2532 if (adev->rio_mem)
2533 pci_iounmap(adev->pdev, adev->rio_mem);
2534 adev->rio_mem = NULL;
2535 iounmap(adev->rmmio);
2536 adev->rmmio = NULL;
Alex Deucher06ec9072017-12-14 15:02:39 -05002537 amdgpu_device_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002538 amdgpu_debugfs_regs_cleanup(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002539}
2540
2541
2542/*
2543 * Suspend & resume.
2544 */
2545/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002546 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002547 *
2548 * @pdev: drm dev pointer
2549 * @state: suspend state
2550 *
2551 * Puts the hw in the suspend state (all asics).
2552 * Returns 0 for success or an error on failure.
2553 * Called at driver suspend.
2554 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002555int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002556{
2557 struct amdgpu_device *adev;
2558 struct drm_crtc *crtc;
2559 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002560 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002561
2562 if (dev == NULL || dev->dev_private == NULL) {
2563 return -ENODEV;
2564 }
2565
2566 adev = dev->dev_private;
2567
2568 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2569 return 0;
2570
2571 drm_kms_helper_poll_disable(dev);
2572
Harry Wentland45622362017-09-12 15:58:20 -04002573 if (!amdgpu_device_has_dc_support(adev)) {
2574 /* turn off display hw */
2575 drm_modeset_lock_all(dev);
2576 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2577 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
2578 }
2579 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002580 }
2581
Yong Zhaoba997702015-11-09 17:21:45 -05002582 amdgpu_amdkfd_suspend(adev);
2583
Alex Deucher756e6882015-10-08 00:03:36 -04002584 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002585 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04002586 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Daniel Stonee68d14d2018-03-30 15:11:38 +01002587 struct drm_framebuffer *fb = crtc->primary->fb;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002588 struct amdgpu_bo *robj;
2589
Alex Deucher756e6882015-10-08 00:03:36 -04002590 if (amdgpu_crtc->cursor_bo) {
2591 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002592 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002593 if (r == 0) {
2594 amdgpu_bo_unpin(aobj);
2595 amdgpu_bo_unreserve(aobj);
2596 }
2597 }
2598
Daniel Stonee68d14d2018-03-30 15:11:38 +01002599 if (fb == NULL || fb->obj[0] == NULL) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002600 continue;
2601 }
Daniel Stonee68d14d2018-03-30 15:11:38 +01002602 robj = gem_to_amdgpu_bo(fb->obj[0]);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002603 /* don't unpin kernel fb objects */
2604 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
Alex Xie7a6901d2017-04-24 13:52:41 -04002605 r = amdgpu_bo_reserve(robj, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002606 if (r == 0) {
2607 amdgpu_bo_unpin(robj);
2608 amdgpu_bo_unreserve(robj);
2609 }
2610 }
2611 }
2612 /* evict vram memory */
2613 amdgpu_bo_evict_vram(adev);
2614
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002615 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002616
Alex Deuchercdd61df2017-12-14 16:47:40 -05002617 r = amdgpu_device_ip_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002618
Alex Deuchera0a71e42016-10-10 12:41:36 -04002619 /* evict remaining vram memory
2620 * This second call to evict vram is to evict the gart page table
2621 * using the CPU.
2622 */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002623 amdgpu_bo_evict_vram(adev);
2624
2625 pci_save_state(dev->pdev);
2626 if (suspend) {
2627 /* Shut down the device */
2628 pci_disable_device(dev->pdev);
2629 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08002630 } else {
2631 r = amdgpu_asic_reset(adev);
2632 if (r)
2633 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002634 }
2635
2636 if (fbcon) {
2637 console_lock();
2638 amdgpu_fbdev_set_suspend(adev, 1);
2639 console_unlock();
2640 }
2641 return 0;
2642}
2643
2644/**
Alex Deucher810ddc32016-08-23 13:25:49 -04002645 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002646 *
2647 * @pdev: drm dev pointer
2648 *
2649 * Bring the hw back to operating state (all asics).
2650 * Returns 0 for success or an error on failure.
2651 * Called at driver resume.
2652 */
Alex Deucher810ddc32016-08-23 13:25:49 -04002653int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002654{
2655 struct drm_connector *connector;
2656 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04002657 struct drm_crtc *crtc;
Huang Rui03161a62017-04-13 16:12:26 +08002658 int r = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002659
2660 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2661 return 0;
2662
jimqu74b0b152016-09-07 17:09:12 +08002663 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002664 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08002665
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002666 if (resume) {
2667 pci_set_power_state(dev->pdev, PCI_D0);
2668 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08002669 r = pci_enable_device(dev->pdev);
Huang Rui03161a62017-04-13 16:12:26 +08002670 if (r)
2671 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002672 }
2673
2674 /* post card */
Alex Deucher39c640c2017-12-15 16:22:11 -05002675 if (amdgpu_device_need_post(adev)) {
jimqu74b0b152016-09-07 17:09:12 +08002676 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
2677 if (r)
2678 DRM_ERROR("amdgpu asic init failed\n");
2679 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002680
Alex Deucher06ec9072017-12-14 15:02:39 -05002681 r = amdgpu_device_ip_resume(adev);
Rex Zhue6707212017-03-30 13:21:01 +08002682 if (r) {
Alex Deucher06ec9072017-12-14 15:02:39 -05002683 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
Huang Rui03161a62017-04-13 16:12:26 +08002684 goto unlock;
Rex Zhue6707212017-03-30 13:21:01 +08002685 }
Alex Deucher5ceb54c2015-08-05 12:41:48 -04002686 amdgpu_fence_driver_resume(adev);
2687
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002688
Alex Deucher06ec9072017-12-14 15:02:39 -05002689 r = amdgpu_device_ip_late_init(adev);
Huang Rui03161a62017-04-13 16:12:26 +08002690 if (r)
2691 goto unlock;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002692
Alex Deucher756e6882015-10-08 00:03:36 -04002693 /* pin cursors */
2694 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2695 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2696
2697 if (amdgpu_crtc->cursor_bo) {
2698 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
Alex Xie7a6901d2017-04-24 13:52:41 -04002699 r = amdgpu_bo_reserve(aobj, true);
Alex Deucher756e6882015-10-08 00:03:36 -04002700 if (r == 0) {
2701 r = amdgpu_bo_pin(aobj,
2702 AMDGPU_GEM_DOMAIN_VRAM,
2703 &amdgpu_crtc->cursor_addr);
2704 if (r != 0)
2705 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2706 amdgpu_bo_unreserve(aobj);
2707 }
2708 }
2709 }
Yong Zhaoba997702015-11-09 17:21:45 -05002710 r = amdgpu_amdkfd_resume(adev);
2711 if (r)
2712 return r;
Alex Deucher756e6882015-10-08 00:03:36 -04002713
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002714 /* blat the mode back in */
2715 if (fbcon) {
Harry Wentland45622362017-09-12 15:58:20 -04002716 if (!amdgpu_device_has_dc_support(adev)) {
2717 /* pre DCE11 */
2718 drm_helper_resume_force_mode(dev);
2719
2720 /* turn on display hw */
2721 drm_modeset_lock_all(dev);
2722 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2723 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2724 }
2725 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002726 }
2727 }
2728
2729 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002730
2731 /*
2732 * Most of the connector probing functions try to acquire runtime pm
2733 * refs to ensure that the GPU is powered on when connector polling is
2734 * performed. Since we're calling this from a runtime PM callback,
2735 * trying to acquire rpm refs will cause us to deadlock.
2736 *
2737 * Since we're guaranteed to be holding the rpm lock, it's safe to
2738 * temporarily disable the rpm helpers so this doesn't deadlock us.
2739 */
2740#ifdef CONFIG_PM
2741 dev->dev->power.disable_depth++;
2742#endif
Harry Wentland45622362017-09-12 15:58:20 -04002743 if (!amdgpu_device_has_dc_support(adev))
2744 drm_helper_hpd_irq_event(dev);
2745 else
2746 drm_kms_helper_hotplug_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002747#ifdef CONFIG_PM
2748 dev->dev->power.disable_depth--;
2749#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002750
Huang Rui03161a62017-04-13 16:12:26 +08002751 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002752 amdgpu_fbdev_set_suspend(adev, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002753
Huang Rui03161a62017-04-13 16:12:26 +08002754unlock:
2755 if (fbcon)
2756 console_unlock();
2757
2758 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002759}
2760
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002761/**
2762 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
2763 *
2764 * @adev: amdgpu_device pointer
2765 *
2766 * The list of all the hardware IPs that make up the asic is walked and
2767 * the check_soft_reset callbacks are run. check_soft_reset determines
2768 * if the asic is still hung or not.
2769 * Returns true if any of the IPs are still in a hung state, false if not.
2770 */
Alex Deucher06ec9072017-12-14 15:02:39 -05002771static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002772{
2773 int i;
2774 bool asic_hang = false;
2775
Monk Liuf993d622017-10-16 19:46:01 +08002776 if (amdgpu_sriov_vf(adev))
2777 return true;
2778
Alex Deucher8bc04c22018-03-29 14:48:37 -05002779 if (amdgpu_asic_need_full_reset(adev))
2780 return true;
2781
Chunming Zhou63fbf422016-07-15 11:19:20 +08002782 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002783 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou63fbf422016-07-15 11:19:20 +08002784 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002785 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2786 adev->ip_blocks[i].status.hang =
2787 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2788 if (adev->ip_blocks[i].status.hang) {
2789 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
Chunming Zhou63fbf422016-07-15 11:19:20 +08002790 asic_hang = true;
2791 }
2792 }
2793 return asic_hang;
2794}
2795
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002796/**
2797 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
2798 *
2799 * @adev: amdgpu_device pointer
2800 *
2801 * The list of all the hardware IPs that make up the asic is walked and the
2802 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
2803 * handles any IP specific hardware or software state changes that are
2804 * necessary for a soft reset to succeed.
2805 * Returns 0 on success, negative error code on failure.
2806 */
Alex Deucher06ec9072017-12-14 15:02:39 -05002807static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002808{
2809 int i, r = 0;
2810
2811 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002812 if (!adev->ip_blocks[i].status.valid)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002813 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002814 if (adev->ip_blocks[i].status.hang &&
2815 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2816 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
Chunming Zhoud31a5012016-07-18 10:04:34 +08002817 if (r)
2818 return r;
2819 }
2820 }
2821
2822 return 0;
2823}
2824
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002825/**
2826 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
2827 *
2828 * @adev: amdgpu_device pointer
2829 *
2830 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
2831 * reset is necessary to recover.
2832 * Returns true if a full asic reset is required, false if not.
2833 */
Alex Deucher06ec9072017-12-14 15:02:39 -05002834static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002835{
Alex Deucherda146d32016-10-13 16:07:03 -04002836 int i;
2837
Alex Deucher8bc04c22018-03-29 14:48:37 -05002838 if (amdgpu_asic_need_full_reset(adev))
2839 return true;
2840
Alex Deucherda146d32016-10-13 16:07:03 -04002841 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002842 if (!adev->ip_blocks[i].status.valid)
Alex Deucherda146d32016-10-13 16:07:03 -04002843 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002844 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2845 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2846 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
Ken Wang98512bb2017-09-14 16:25:19 +08002847 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
2848 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
Alex Deuchera1255102016-10-13 17:41:13 -04002849 if (adev->ip_blocks[i].status.hang) {
Alex Deucherda146d32016-10-13 16:07:03 -04002850 DRM_INFO("Some block need full reset!\n");
2851 return true;
2852 }
2853 }
Chunming Zhou35d782f2016-07-15 15:57:13 +08002854 }
2855 return false;
2856}
2857
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002858/**
2859 * amdgpu_device_ip_soft_reset - do a soft reset
2860 *
2861 * @adev: amdgpu_device pointer
2862 *
2863 * The list of all the hardware IPs that make up the asic is walked and the
2864 * soft_reset callbacks are run if the block is hung. soft_reset handles any
2865 * IP specific hardware or software state changes that are necessary to soft
2866 * reset the IP.
2867 * Returns 0 on success, negative error code on failure.
2868 */
Alex Deucher06ec9072017-12-14 15:02:39 -05002869static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002870{
2871 int i, r = 0;
2872
2873 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002874 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002875 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002876 if (adev->ip_blocks[i].status.hang &&
2877 adev->ip_blocks[i].version->funcs->soft_reset) {
2878 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002879 if (r)
2880 return r;
2881 }
2882 }
2883
2884 return 0;
2885}
2886
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002887/**
2888 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
2889 *
2890 * @adev: amdgpu_device pointer
2891 *
2892 * The list of all the hardware IPs that make up the asic is walked and the
2893 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
2894 * handles any IP specific hardware or software state changes that are
2895 * necessary after the IP has been soft reset.
2896 * Returns 0 on success, negative error code on failure.
2897 */
Alex Deucher06ec9072017-12-14 15:02:39 -05002898static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002899{
2900 int i, r = 0;
2901
2902 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -04002903 if (!adev->ip_blocks[i].status.valid)
Chunming Zhou35d782f2016-07-15 15:57:13 +08002904 continue;
Alex Deuchera1255102016-10-13 17:41:13 -04002905 if (adev->ip_blocks[i].status.hang &&
2906 adev->ip_blocks[i].version->funcs->post_soft_reset)
2907 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08002908 if (r)
2909 return r;
2910 }
2911
2912 return 0;
2913}
2914
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002915/**
2916 * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers
2917 *
2918 * @adev: amdgpu_device pointer
2919 * @ring: amdgpu_ring for the engine handling the buffer operations
2920 * @bo: amdgpu_bo buffer whose shadow is being restored
2921 * @fence: dma_fence associated with the operation
2922 *
2923 * Restores the VRAM buffer contents from the shadow in GTT. Used to
2924 * restore things like GPUVM page tables after a GPU reset where
2925 * the contents of VRAM might be lost.
2926 * Returns 0 on success, negative error code on failure.
2927 */
Alex Deucher06ec9072017-12-14 15:02:39 -05002928static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
2929 struct amdgpu_ring *ring,
2930 struct amdgpu_bo *bo,
2931 struct dma_fence **fence)
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002932{
2933 uint32_t domain;
2934 int r;
2935
Roger.He23d2e502017-04-21 14:24:26 +08002936 if (!bo->shadow)
2937 return 0;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002938
Alex Xie1d284792017-04-24 13:53:04 -04002939 r = amdgpu_bo_reserve(bo, true);
Roger.He23d2e502017-04-21 14:24:26 +08002940 if (r)
2941 return r;
2942 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2943 /* if bo has been evicted, then no need to recover */
2944 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
Roger.He82521312017-04-21 13:08:43 +08002945 r = amdgpu_bo_validate(bo->shadow);
2946 if (r) {
2947 DRM_ERROR("bo validate failed!\n");
2948 goto err;
2949 }
2950
Roger.He23d2e502017-04-21 14:24:26 +08002951 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002952 NULL, fence, true);
Roger.He23d2e502017-04-21 14:24:26 +08002953 if (r) {
2954 DRM_ERROR("recover page table failed!\n");
2955 goto err;
2956 }
2957 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002958err:
Roger.He23d2e502017-04-21 14:24:26 +08002959 amdgpu_bo_unreserve(bo);
2960 return r;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002961}
2962
Alex Deuchere3ecdff2018-03-15 17:39:45 -05002963/**
2964 * amdgpu_device_handle_vram_lost - Handle the loss of VRAM contents
2965 *
2966 * @adev: amdgpu_device pointer
2967 *
2968 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
2969 * restore things like GPUVM page tables after a GPU reset where
2970 * the contents of VRAM might be lost.
2971 * Returns 0 on success, 1 on failure.
2972 */
Monk Liuc41d1cf2017-12-25 11:59:27 +08002973static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
2974{
2975 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2976 struct amdgpu_bo *bo, *tmp;
2977 struct dma_fence *fence = NULL, *next = NULL;
2978 long r = 1;
2979 int i = 0;
2980 long tmo;
2981
2982 if (amdgpu_sriov_runtime(adev))
2983 tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
2984 else
2985 tmo = msecs_to_jiffies(100);
2986
2987 DRM_INFO("recover vram bo from shadow start\n");
2988 mutex_lock(&adev->shadow_list_lock);
2989 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2990 next = NULL;
2991 amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
2992 if (fence) {
2993 r = dma_fence_wait_timeout(fence, false, tmo);
2994 if (r == 0)
2995 pr_err("wait fence %p[%d] timeout\n", fence, i);
2996 else if (r < 0)
2997 pr_err("wait fence %p[%d] interrupted\n", fence, i);
2998 if (r < 1) {
2999 dma_fence_put(fence);
3000 fence = next;
3001 break;
3002 }
3003 i++;
3004 }
3005
3006 dma_fence_put(fence);
3007 fence = next;
3008 }
3009 mutex_unlock(&adev->shadow_list_lock);
3010
3011 if (fence) {
3012 r = dma_fence_wait_timeout(fence, false, tmo);
3013 if (r == 0)
3014 pr_err("wait fence %p[%d] timeout\n", fence, i);
3015 else if (r < 0)
3016 pr_err("wait fence %p[%d] interrupted\n", fence, i);
3017
3018 }
3019 dma_fence_put(fence);
3020
3021 if (r > 0)
3022 DRM_INFO("recover vram bo from shadow done\n");
3023 else
3024 DRM_ERROR("recover vram bo from shadow failed\n");
3025
Alex Deuchere3ecdff2018-03-15 17:39:45 -05003026 return (r > 0) ? 0 : 1;
Monk Liuc41d1cf2017-12-25 11:59:27 +08003027}
3028
Alex Deuchere3ecdff2018-03-15 17:39:45 -05003029/**
Alex Deucher06ec9072017-12-14 15:02:39 -05003030 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
Monk Liua90ad3c2017-01-23 14:22:08 +08003031 *
3032 * @adev: amdgpu device pointer
Monk Liua90ad3c2017-01-23 14:22:08 +08003033 *
Monk Liu57406822017-10-25 16:37:02 +08003034 * attempt to do soft-reset or full-reset and reinitialize Asic
3035 * return 0 means successed otherwise failed
Alex Deuchere3ecdff2018-03-15 17:39:45 -05003036 */
Monk Liuc41d1cf2017-12-25 11:59:27 +08003037static int amdgpu_device_reset(struct amdgpu_device *adev)
Monk Liua90ad3c2017-01-23 14:22:08 +08003038{
Monk Liu57406822017-10-25 16:37:02 +08003039 bool need_full_reset, vram_lost = 0;
3040 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003041
Alex Deucher06ec9072017-12-14 15:02:39 -05003042 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
Chunming Zhou35d782f2016-07-15 15:57:13 +08003043
3044 if (!need_full_reset) {
Alex Deucher06ec9072017-12-14 15:02:39 -05003045 amdgpu_device_ip_pre_soft_reset(adev);
3046 r = amdgpu_device_ip_soft_reset(adev);
3047 amdgpu_device_ip_post_soft_reset(adev);
3048 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
Chunming Zhou35d782f2016-07-15 15:57:13 +08003049 DRM_INFO("soft reset failed, will fallback to full reset!\n");
3050 need_full_reset = true;
3051 }
3052 }
3053
3054 if (need_full_reset) {
Alex Deuchercdd61df2017-12-14 16:47:40 -05003055 r = amdgpu_device_ip_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003056
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003057retry:
Chunming Zhou35d782f2016-07-15 15:57:13 +08003058 r = amdgpu_asic_reset(adev);
3059 /* post card */
3060 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05003061
Chunming Zhou35d782f2016-07-15 15:57:13 +08003062 if (!r) {
3063 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
Alex Deucher06ec9072017-12-14 15:02:39 -05003064 r = amdgpu_device_ip_resume_phase1(adev);
Chunming Zhoufcf06492017-05-05 10:33:33 +08003065 if (r)
3066 goto out;
Monk Liu57406822017-10-25 16:37:02 +08003067
Alex Deucher06ec9072017-12-14 15:02:39 -05003068 vram_lost = amdgpu_device_check_vram_lost(adev);
Chunming Zhouf1892132017-05-15 16:48:27 +08003069 if (vram_lost) {
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08003070 DRM_ERROR("VRAM is lost!\n");
Chunming Zhouf1892132017-05-15 16:48:27 +08003071 atomic_inc(&adev->vram_lost_counter);
3072 }
Monk Liu57406822017-10-25 16:37:02 +08003073
Christian Königc1c7ce82017-10-16 16:50:32 +02003074 r = amdgpu_gtt_mgr_recover(
3075 &adev->mman.bdev.man[TTM_PL_TT]);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08003076 if (r)
Chunming Zhoufcf06492017-05-05 10:33:33 +08003077 goto out;
Monk Liu57406822017-10-25 16:37:02 +08003078
Alex Deucher06ec9072017-12-14 15:02:39 -05003079 r = amdgpu_device_ip_resume_phase2(adev);
Chunming Zhoufcf06492017-05-05 10:33:33 +08003080 if (r)
3081 goto out;
Monk Liu57406822017-10-25 16:37:02 +08003082
Chunming Zhou0c49e0b2017-05-15 14:20:00 +08003083 if (vram_lost)
Alex Deucher06ec9072017-12-14 15:02:39 -05003084 amdgpu_device_fill_reset_magic(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08003085 }
Chunming Zhoufcf06492017-05-05 10:33:33 +08003086 }
Monk Liu57406822017-10-25 16:37:02 +08003087
Chunming Zhoufcf06492017-05-05 10:33:33 +08003088out:
3089 if (!r) {
3090 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou1f465082016-06-30 15:02:26 +08003091 r = amdgpu_ib_ring_tests(adev);
3092 if (r) {
3093 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Alex Deuchercdd61df2017-12-14 16:47:40 -05003094 r = amdgpu_device_ip_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08003095 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08003096 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08003097 }
Monk Liu57406822017-10-25 16:37:02 +08003098 }
3099
Monk Liuc41d1cf2017-12-25 11:59:27 +08003100 if (!r && ((need_full_reset && !(adev->flags & AMD_IS_APU)) || vram_lost))
3101 r = amdgpu_device_handle_vram_lost(adev);
Monk Liu57406822017-10-25 16:37:02 +08003102
3103 return r;
3104}
3105
Alex Deuchere3ecdff2018-03-15 17:39:45 -05003106/**
Alex Deucher06ec9072017-12-14 15:02:39 -05003107 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
Monk Liu57406822017-10-25 16:37:02 +08003108 *
3109 * @adev: amdgpu device pointer
Monk Liu57406822017-10-25 16:37:02 +08003110 *
3111 * do VF FLR and reinitialize Asic
3112 * return 0 means successed otherwise failed
Alex Deuchere3ecdff2018-03-15 17:39:45 -05003113 */
3114static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3115 bool from_hypervisor)
Monk Liu57406822017-10-25 16:37:02 +08003116{
3117 int r;
3118
3119 if (from_hypervisor)
3120 r = amdgpu_virt_request_full_gpu(adev, true);
3121 else
3122 r = amdgpu_virt_reset_gpu(adev);
3123 if (r)
3124 return r;
3125
3126 /* Resume IP prior to SMC */
Alex Deucher06ec9072017-12-14 15:02:39 -05003127 r = amdgpu_device_ip_reinit_early_sriov(adev);
Monk Liu57406822017-10-25 16:37:02 +08003128 if (r)
3129 goto error;
3130
3131 /* we need recover gart prior to run SMC/CP/SDMA resume */
Christian Königc1c7ce82017-10-16 16:50:32 +02003132 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
Monk Liu57406822017-10-25 16:37:02 +08003133
3134 /* now we are okay to resume SMC/CP/SDMA */
Alex Deucher06ec9072017-12-14 15:02:39 -05003135 r = amdgpu_device_ip_reinit_late_sriov(adev);
Monk Liuc41d1cf2017-12-25 11:59:27 +08003136 amdgpu_virt_release_full_gpu(adev, true);
Monk Liu57406822017-10-25 16:37:02 +08003137 if (r)
3138 goto error;
3139
3140 amdgpu_irq_gpu_reset_resume_helper(adev);
3141 r = amdgpu_ib_ring_tests(adev);
Monk Liuc41d1cf2017-12-25 11:59:27 +08003142
3143 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3144 atomic_inc(&adev->vram_lost_counter);
3145 r = amdgpu_device_handle_vram_lost(adev);
3146 }
Monk Liu57406822017-10-25 16:37:02 +08003147
3148error:
Monk Liu57406822017-10-25 16:37:02 +08003149
3150 return r;
3151}
3152
3153/**
Alex Deucher5f152b52017-12-15 16:40:49 -05003154 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
Monk Liu57406822017-10-25 16:37:02 +08003155 *
3156 * @adev: amdgpu device pointer
3157 * @job: which job trigger hang
Andrey Grodzovskydcebf022017-12-12 14:09:30 -05003158 * @force forces reset regardless of amdgpu_gpu_recovery
Monk Liu57406822017-10-25 16:37:02 +08003159 *
3160 * Attempt to reset the GPU if it has hung (all asics).
3161 * Returns 0 for success or an error on failure.
3162 */
Alex Deucher5f152b52017-12-15 16:40:49 -05003163int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
3164 struct amdgpu_job *job, bool force)
Monk Liu57406822017-10-25 16:37:02 +08003165{
3166 struct drm_atomic_state *state = NULL;
Monk Liu57406822017-10-25 16:37:02 +08003167 int i, r, resched;
3168
Andrey Grodzovsky54bc1392018-01-19 17:23:08 -05003169 if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
Monk Liu57406822017-10-25 16:37:02 +08003170 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
3171 return 0;
3172 }
3173
Andrey Grodzovskydcebf022017-12-12 14:09:30 -05003174 if (!force && (amdgpu_gpu_recovery == 0 ||
3175 (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
3176 DRM_INFO("GPU recovery disabled.\n");
3177 return 0;
3178 }
3179
Monk Liu57406822017-10-25 16:37:02 +08003180 dev_info(adev->dev, "GPU reset begin!\n");
3181
Monk Liu13a752e2017-10-17 15:11:12 +08003182 mutex_lock(&adev->lock_reset);
Monk Liu57406822017-10-25 16:37:02 +08003183 atomic_inc(&adev->gpu_reset_counter);
Monk Liu13a752e2017-10-17 15:11:12 +08003184 adev->in_gpu_reset = 1;
Monk Liu57406822017-10-25 16:37:02 +08003185
3186 /* block TTM */
3187 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
Monk Liu71182662017-12-25 15:14:58 +08003188
Monk Liu57406822017-10-25 16:37:02 +08003189 /* store modesetting */
3190 if (amdgpu_device_has_dc_support(adev))
3191 state = drm_atomic_helper_suspend(adev->ddev);
3192
Monk Liu71182662017-12-25 15:14:58 +08003193 /* block all schedulers and reset given job's ring */
Monk Liu57406822017-10-25 16:37:02 +08003194 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3195 struct amdgpu_ring *ring = adev->rings[i];
3196
3197 if (!ring || !ring->sched.thread)
3198 continue;
3199
Monk Liu71182662017-12-25 15:14:58 +08003200 kthread_park(ring->sched.thread);
3201
Monk Liu57406822017-10-25 16:37:02 +08003202 if (job && job->ring->idx != i)
3203 continue;
3204
Lucas Stach1b1f42d2017-12-06 17:49:39 +01003205 drm_sched_hw_job_reset(&ring->sched, &job->base);
Monk Liu57406822017-10-25 16:37:02 +08003206
3207 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
3208 amdgpu_fence_driver_force_completion(ring);
3209 }
3210
3211 if (amdgpu_sriov_vf(adev))
Monk Liuc41d1cf2017-12-25 11:59:27 +08003212 r = amdgpu_device_reset_sriov(adev, job ? false : true);
Monk Liu57406822017-10-25 16:37:02 +08003213 else
Monk Liuc41d1cf2017-12-25 11:59:27 +08003214 r = amdgpu_device_reset(adev);
Monk Liu57406822017-10-25 16:37:02 +08003215
Monk Liu71182662017-12-25 15:14:58 +08003216 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
3217 struct amdgpu_ring *ring = adev->rings[i];
Chunming Zhou51687752017-04-24 17:09:15 +08003218
Monk Liu71182662017-12-25 15:14:58 +08003219 if (!ring || !ring->sched.thread)
3220 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08003221
Monk Liu71182662017-12-25 15:14:58 +08003222 /* only need recovery sched of the given job's ring
3223 * or all rings (in the case @job is NULL)
3224 * after above amdgpu_reset accomplished
3225 */
3226 if ((!job || job->ring->idx == i) && !r)
Lucas Stach1b1f42d2017-12-06 17:49:39 +01003227 drm_sched_job_recovery(&ring->sched);
Monk Liu57406822017-10-25 16:37:02 +08003228
Monk Liu71182662017-12-25 15:14:58 +08003229 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003230 }
3231
Harry Wentland45622362017-09-12 15:58:20 -04003232 if (amdgpu_device_has_dc_support(adev)) {
Monk Liu57406822017-10-25 16:37:02 +08003233 if (drm_atomic_helper_resume(adev->ddev, state))
3234 dev_info(adev->dev, "drm resume failed:%d\n", r);
Monk Liu57406822017-10-25 16:37:02 +08003235 } else {
Harry Wentland45622362017-09-12 15:58:20 -04003236 drm_helper_resume_force_mode(adev->ddev);
Monk Liu57406822017-10-25 16:37:02 +08003237 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003238
3239 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
Monk Liu57406822017-10-25 16:37:02 +08003240
Gavin Wan89041942017-06-23 13:55:15 -04003241 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003242 /* bad news, how to tell it to userspace ? */
Monk Liu57406822017-10-25 16:37:02 +08003243 dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
3244 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
3245 } else {
3246 dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
Gavin Wan89041942017-06-23 13:55:15 -04003247 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003248
Gavin Wan89041942017-06-23 13:55:15 -04003249 amdgpu_vf_error_trans_all(adev);
Monk Liu13a752e2017-10-17 15:11:12 +08003250 adev->in_gpu_reset = 0;
3251 mutex_unlock(&adev->lock_reset);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003252 return r;
3253}
3254
Alex Deuchere3ecdff2018-03-15 17:39:45 -05003255/**
3256 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
3257 *
3258 * @adev: amdgpu_device pointer
3259 *
3260 * Fetchs and stores in the driver the PCIE capabilities (gen speed
3261 * and lanes) of the slot the device is in. Handles APUs and
3262 * virtualized environments where PCIE config space may not be available.
3263 */
Alex Deucher5494d862018-03-09 15:14:11 -05003264static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003265{
3266 u32 mask;
3267 int ret;
3268
Alex Deuchercd474ba2016-02-04 10:21:23 -05003269 if (amdgpu_pcie_gen_cap)
3270 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
3271
3272 if (amdgpu_pcie_lane_cap)
3273 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
3274
3275 /* covers APUs as well */
3276 if (pci_is_root_bus(adev->pdev->bus)) {
3277 if (adev->pm.pcie_gen_mask == 0)
3278 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3279 if (adev->pm.pcie_mlw_mask == 0)
3280 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003281 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003282 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05003283
3284 if (adev->pm.pcie_gen_mask == 0) {
3285 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
3286 if (!ret) {
3287 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
3288 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
3289 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
3290
3291 if (mask & DRM_PCIE_SPEED_25)
3292 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
3293 if (mask & DRM_PCIE_SPEED_50)
3294 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
3295 if (mask & DRM_PCIE_SPEED_80)
3296 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
3297 } else {
3298 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3299 }
3300 }
3301 if (adev->pm.pcie_mlw_mask == 0) {
3302 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
3303 if (!ret) {
3304 switch (mask) {
3305 case 32:
3306 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
3307 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3308 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3309 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3310 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3311 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3312 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3313 break;
3314 case 16:
3315 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
3316 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3317 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3318 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3319 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3320 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3321 break;
3322 case 12:
3323 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
3324 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3325 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3326 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3327 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3328 break;
3329 case 8:
3330 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
3331 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3332 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3333 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3334 break;
3335 case 4:
3336 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
3337 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3338 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3339 break;
3340 case 2:
3341 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
3342 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
3343 break;
3344 case 1:
3345 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
3346 break;
3347 default:
3348 break;
3349 }
3350 } else {
3351 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05003352 }
3353 }
3354}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04003355