blob: a58513f271e3fdf410385d3e1b801c7fe413e256 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
Chunming Zhou0875dc92016-06-12 15:41:58 +080028#include <linux/kthread.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040029#include <linux/console.h>
30#include <linux/slab.h>
31#include <linux/debugfs.h>
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/amdgpu_drm.h>
35#include <linux/vgaarb.h>
36#include <linux/vga_switcheroo.h>
37#include <linux/efi.h>
38#include "amdgpu.h"
Tom St Denisf4b373f2016-05-31 08:02:27 -040039#include "amdgpu_trace.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040040#include "amdgpu_i2c.h"
41#include "atom.h"
42#include "amdgpu_atombios.h"
Alex Deucherd0dd7f02015-11-11 19:45:06 -050043#include "amd_pcie.h"
Ken Wang33f34802016-01-21 17:29:41 +080044#ifdef CONFIG_DRM_AMDGPU_SI
45#include "si.h"
46#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -040047#ifdef CONFIG_DRM_AMDGPU_CIK
48#include "cik.h"
49#endif
Alex Deucheraaa36a92015-04-20 17:31:14 -040050#include "vi.h"
Alex Deucherd38ceaf2015-04-20 16:55:21 -040051#include "bif/bif_4_1_d.h"
Emily Deng9accf2f2016-08-10 16:01:25 +080052#include <linux/pci.h>
Monk Liubec86372016-09-14 19:38:08 +080053#include <linux/firmware.h>
Alex Deucherd38ceaf2015-04-20 16:55:21 -040054
55static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
56static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
57
58static const char *amdgpu_asic_name[] = {
Ken Wangda69c1612016-01-21 19:08:55 +080059 "TAHITI",
60 "PITCAIRN",
61 "VERDE",
62 "OLAND",
63 "HAINAN",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040064 "BONAIRE",
65 "KAVERI",
66 "KABINI",
67 "HAWAII",
68 "MULLINS",
69 "TOPAZ",
70 "TONGA",
David Zhang48299f92015-07-08 01:05:16 +080071 "FIJI",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040072 "CARRIZO",
Samuel Li139f4912015-10-08 14:50:27 -040073 "STONEY",
Flora Cui2cc0c0b2016-03-14 18:33:29 -040074 "POLARIS10",
75 "POLARIS11",
Alex Deucherd38ceaf2015-04-20 16:55:21 -040076 "LAST",
77};
78
79bool amdgpu_device_is_px(struct drm_device *dev)
80{
81 struct amdgpu_device *adev = dev->dev_private;
82
Jammy Zhou2f7d10b2015-07-22 11:29:01 +080083 if (adev->flags & AMD_IS_PX)
Alex Deucherd38ceaf2015-04-20 16:55:21 -040084 return true;
85 return false;
86}
87
88/*
89 * MMIO register access helper functions.
90 */
91uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
92 bool always_indirect)
93{
Tom St Denisf4b373f2016-05-31 08:02:27 -040094 uint32_t ret;
95
Alex Deucherd38ceaf2015-04-20 16:55:21 -040096 if ((reg * 4) < adev->rmmio_size && !always_indirect)
Tom St Denisf4b373f2016-05-31 08:02:27 -040097 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Alex Deucherd38ceaf2015-04-20 16:55:21 -040098 else {
99 unsigned long flags;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400100
101 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
102 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
103 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
104 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400105 }
Tom St Denisf4b373f2016-05-31 08:02:27 -0400106 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
107 return ret;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400108}
109
110void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
111 bool always_indirect)
112{
Tom St Denisf4b373f2016-05-31 08:02:27 -0400113 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
Monk Liu4e99a442016-03-31 13:26:59 +0800114
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400115 if ((reg * 4) < adev->rmmio_size && !always_indirect)
116 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
117 else {
118 unsigned long flags;
119
120 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
121 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
122 writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
123 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
124 }
125}
126
127u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
128{
129 if ((reg * 4) < adev->rio_mem_size)
130 return ioread32(adev->rio_mem + (reg * 4));
131 else {
132 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
133 return ioread32(adev->rio_mem + (mmMM_DATA * 4));
134 }
135}
136
137void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
138{
139
140 if ((reg * 4) < adev->rio_mem_size)
141 iowrite32(v, adev->rio_mem + (reg * 4));
142 else {
143 iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
144 iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
145 }
146}
147
148/**
149 * amdgpu_mm_rdoorbell - read a doorbell dword
150 *
151 * @adev: amdgpu_device pointer
152 * @index: doorbell index
153 *
154 * Returns the value in the doorbell aperture at the
155 * requested doorbell index (CIK).
156 */
157u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
158{
159 if (index < adev->doorbell.num_doorbells) {
160 return readl(adev->doorbell.ptr + index);
161 } else {
162 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
163 return 0;
164 }
165}
166
167/**
168 * amdgpu_mm_wdoorbell - write a doorbell dword
169 *
170 * @adev: amdgpu_device pointer
171 * @index: doorbell index
172 * @v: value to write
173 *
174 * Writes @v to the doorbell aperture at the
175 * requested doorbell index (CIK).
176 */
177void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
178{
179 if (index < adev->doorbell.num_doorbells) {
180 writel(v, adev->doorbell.ptr + index);
181 } else {
182 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
183 }
184}
185
186/**
187 * amdgpu_invalid_rreg - dummy reg read function
188 *
189 * @adev: amdgpu device pointer
190 * @reg: offset of register
191 *
192 * Dummy register read function. Used for register blocks
193 * that certain asics don't have (all asics).
194 * Returns the value in the register.
195 */
196static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
197{
198 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
199 BUG();
200 return 0;
201}
202
203/**
204 * amdgpu_invalid_wreg - dummy reg write function
205 *
206 * @adev: amdgpu device pointer
207 * @reg: offset of register
208 * @v: value to write to the register
209 *
210 * Dummy register read function. Used for register blocks
211 * that certain asics don't have (all asics).
212 */
213static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
214{
215 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
216 reg, v);
217 BUG();
218}
219
220/**
221 * amdgpu_block_invalid_rreg - dummy reg read function
222 *
223 * @adev: amdgpu device pointer
224 * @block: offset of instance
225 * @reg: offset of register
226 *
227 * Dummy register read function. Used for register blocks
228 * that certain asics don't have (all asics).
229 * Returns the value in the register.
230 */
231static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
232 uint32_t block, uint32_t reg)
233{
234 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
235 reg, block);
236 BUG();
237 return 0;
238}
239
240/**
241 * amdgpu_block_invalid_wreg - dummy reg write function
242 *
243 * @adev: amdgpu device pointer
244 * @block: offset of instance
245 * @reg: offset of register
246 * @v: value to write to the register
247 *
248 * Dummy register read function. Used for register blocks
249 * that certain asics don't have (all asics).
250 */
251static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
252 uint32_t block,
253 uint32_t reg, uint32_t v)
254{
255 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
256 reg, block, v);
257 BUG();
258}
259
260static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
261{
262 int r;
263
264 if (adev->vram_scratch.robj == NULL) {
265 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
Alex Deucher857d9132015-08-27 00:14:16 -0400266 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
267 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
Christian König72d76682015-09-03 17:34:59 +0200268 NULL, NULL, &adev->vram_scratch.robj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400269 if (r) {
270 return r;
271 }
272 }
273
274 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
275 if (unlikely(r != 0))
276 return r;
277 r = amdgpu_bo_pin(adev->vram_scratch.robj,
278 AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
279 if (r) {
280 amdgpu_bo_unreserve(adev->vram_scratch.robj);
281 return r;
282 }
283 r = amdgpu_bo_kmap(adev->vram_scratch.robj,
284 (void **)&adev->vram_scratch.ptr);
285 if (r)
286 amdgpu_bo_unpin(adev->vram_scratch.robj);
287 amdgpu_bo_unreserve(adev->vram_scratch.robj);
288
289 return r;
290}
291
292static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
293{
294 int r;
295
296 if (adev->vram_scratch.robj == NULL) {
297 return;
298 }
299 r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
300 if (likely(r == 0)) {
301 amdgpu_bo_kunmap(adev->vram_scratch.robj);
302 amdgpu_bo_unpin(adev->vram_scratch.robj);
303 amdgpu_bo_unreserve(adev->vram_scratch.robj);
304 }
305 amdgpu_bo_unref(&adev->vram_scratch.robj);
306}
307
308/**
309 * amdgpu_program_register_sequence - program an array of registers.
310 *
311 * @adev: amdgpu_device pointer
312 * @registers: pointer to the register array
313 * @array_size: size of the register array
314 *
315 * Programs an array or registers with and and or masks.
316 * This is a helper for setting golden registers.
317 */
318void amdgpu_program_register_sequence(struct amdgpu_device *adev,
319 const u32 *registers,
320 const u32 array_size)
321{
322 u32 tmp, reg, and_mask, or_mask;
323 int i;
324
325 if (array_size % 3)
326 return;
327
328 for (i = 0; i < array_size; i +=3) {
329 reg = registers[i + 0];
330 and_mask = registers[i + 1];
331 or_mask = registers[i + 2];
332
333 if (and_mask == 0xffffffff) {
334 tmp = or_mask;
335 } else {
336 tmp = RREG32(reg);
337 tmp &= ~and_mask;
338 tmp |= or_mask;
339 }
340 WREG32(reg, tmp);
341 }
342}
343
344void amdgpu_pci_config_reset(struct amdgpu_device *adev)
345{
346 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
347}
348
349/*
350 * GPU doorbell aperture helpers function.
351 */
352/**
353 * amdgpu_doorbell_init - Init doorbell driver information.
354 *
355 * @adev: amdgpu_device pointer
356 *
357 * Init doorbell driver information (CIK)
358 * Returns 0 on success, error on failure.
359 */
360static int amdgpu_doorbell_init(struct amdgpu_device *adev)
361{
362 /* doorbell bar mapping */
363 adev->doorbell.base = pci_resource_start(adev->pdev, 2);
364 adev->doorbell.size = pci_resource_len(adev->pdev, 2);
365
Christian Königedf600d2016-05-03 15:54:54 +0200366 adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400367 AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
368 if (adev->doorbell.num_doorbells == 0)
369 return -EINVAL;
370
371 adev->doorbell.ptr = ioremap(adev->doorbell.base, adev->doorbell.num_doorbells * sizeof(u32));
372 if (adev->doorbell.ptr == NULL) {
373 return -ENOMEM;
374 }
375 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)adev->doorbell.base);
376 DRM_INFO("doorbell mmio size: %u\n", (unsigned)adev->doorbell.size);
377
378 return 0;
379}
380
381/**
382 * amdgpu_doorbell_fini - Tear down doorbell driver information.
383 *
384 * @adev: amdgpu_device pointer
385 *
386 * Tear down doorbell driver information (CIK)
387 */
388static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
389{
390 iounmap(adev->doorbell.ptr);
391 adev->doorbell.ptr = NULL;
392}
393
394/**
395 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
396 * setup amdkfd
397 *
398 * @adev: amdgpu_device pointer
399 * @aperture_base: output returning doorbell aperture base physical address
400 * @aperture_size: output returning doorbell aperture size in bytes
401 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
402 *
403 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
404 * takes doorbells required for its own rings and reports the setup to amdkfd.
405 * amdgpu reserved doorbells are at the start of the doorbell aperture.
406 */
407void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
408 phys_addr_t *aperture_base,
409 size_t *aperture_size,
410 size_t *start_offset)
411{
412 /*
413 * The first num_doorbells are used by amdgpu.
414 * amdkfd takes whatever's left in the aperture.
415 */
416 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
417 *aperture_base = adev->doorbell.base;
418 *aperture_size = adev->doorbell.size;
419 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
420 } else {
421 *aperture_base = 0;
422 *aperture_size = 0;
423 *start_offset = 0;
424 }
425}
426
427/*
428 * amdgpu_wb_*()
429 * Writeback is the the method by which the the GPU updates special pages
430 * in memory with the status of certain GPU events (fences, ring pointers,
431 * etc.).
432 */
433
434/**
435 * amdgpu_wb_fini - Disable Writeback and free memory
436 *
437 * @adev: amdgpu_device pointer
438 *
439 * Disables Writeback and frees the Writeback memory (all asics).
440 * Used at driver shutdown.
441 */
442static void amdgpu_wb_fini(struct amdgpu_device *adev)
443{
444 if (adev->wb.wb_obj) {
445 if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) {
446 amdgpu_bo_kunmap(adev->wb.wb_obj);
447 amdgpu_bo_unpin(adev->wb.wb_obj);
448 amdgpu_bo_unreserve(adev->wb.wb_obj);
449 }
450 amdgpu_bo_unref(&adev->wb.wb_obj);
451 adev->wb.wb = NULL;
452 adev->wb.wb_obj = NULL;
453 }
454}
455
456/**
457 * amdgpu_wb_init- Init Writeback driver info and allocate memory
458 *
459 * @adev: amdgpu_device pointer
460 *
461 * Disables Writeback and frees the Writeback memory (all asics).
462 * Used at driver startup.
463 * Returns 0 on success or an -error on failure.
464 */
465static int amdgpu_wb_init(struct amdgpu_device *adev)
466{
467 int r;
468
469 if (adev->wb.wb_obj == NULL) {
470 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
Christian König72d76682015-09-03 17:34:59 +0200471 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
472 &adev->wb.wb_obj);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400473 if (r) {
474 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
475 return r;
476 }
477 r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
478 if (unlikely(r != 0)) {
479 amdgpu_wb_fini(adev);
480 return r;
481 }
482 r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
483 &adev->wb.gpu_addr);
484 if (r) {
485 amdgpu_bo_unreserve(adev->wb.wb_obj);
486 dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
487 amdgpu_wb_fini(adev);
488 return r;
489 }
490 r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
491 amdgpu_bo_unreserve(adev->wb.wb_obj);
492 if (r) {
493 dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
494 amdgpu_wb_fini(adev);
495 return r;
496 }
497
498 adev->wb.num_wb = AMDGPU_MAX_WB;
499 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
500
501 /* clear wb memory */
502 memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE);
503 }
504
505 return 0;
506}
507
508/**
509 * amdgpu_wb_get - Allocate a wb entry
510 *
511 * @adev: amdgpu_device pointer
512 * @wb: wb index
513 *
514 * Allocate a wb slot for use by the driver (all asics).
515 * Returns 0 on success or -EINVAL on failure.
516 */
517int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
518{
519 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
520 if (offset < adev->wb.num_wb) {
521 __set_bit(offset, adev->wb.used);
522 *wb = offset;
523 return 0;
524 } else {
525 return -EINVAL;
526 }
527}
528
529/**
530 * amdgpu_wb_free - Free a wb entry
531 *
532 * @adev: amdgpu_device pointer
533 * @wb: wb index
534 *
535 * Free a wb slot allocated for use by the driver (all asics)
536 */
537void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
538{
539 if (wb < adev->wb.num_wb)
540 __clear_bit(wb, adev->wb.used);
541}
542
543/**
544 * amdgpu_vram_location - try to find VRAM location
545 * @adev: amdgpu device structure holding all necessary informations
546 * @mc: memory controller structure holding memory informations
547 * @base: base address at which to put VRAM
548 *
549 * Function will place try to place VRAM at base address provided
550 * as parameter (which is so far either PCI aperture address or
551 * for IGP TOM base address).
552 *
553 * If there is not enough space to fit the unvisible VRAM in the 32bits
554 * address space then we limit the VRAM size to the aperture.
555 *
556 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
557 * this shouldn't be a problem as we are using the PCI aperture as a reference.
558 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
559 * not IGP.
560 *
561 * Note: we use mc_vram_size as on some board we need to program the mc to
562 * cover the whole aperture even if VRAM size is inferior to aperture size
563 * Novell bug 204882 + along with lots of ubuntu ones
564 *
565 * Note: when limiting vram it's safe to overwritte real_vram_size because
566 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
567 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
568 * ones)
569 *
570 * Note: IGP TOM addr should be the same as the aperture addr, we don't
571 * explicitly check for that thought.
572 *
573 * FIXME: when reducing VRAM size align new size on power of 2.
574 */
575void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
576{
577 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
578
579 mc->vram_start = base;
580 if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
581 dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
582 mc->real_vram_size = mc->aper_size;
583 mc->mc_vram_size = mc->aper_size;
584 }
585 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
586 if (limit && limit < mc->real_vram_size)
587 mc->real_vram_size = limit;
588 dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
589 mc->mc_vram_size >> 20, mc->vram_start,
590 mc->vram_end, mc->real_vram_size >> 20);
591}
592
593/**
594 * amdgpu_gtt_location - try to find GTT location
595 * @adev: amdgpu device structure holding all necessary informations
596 * @mc: memory controller structure holding memory informations
597 *
598 * Function will place try to place GTT before or after VRAM.
599 *
600 * If GTT size is bigger than space left then we ajust GTT size.
601 * Thus function will never fails.
602 *
603 * FIXME: when reducing GTT size align new size on power of 2.
604 */
605void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
606{
607 u64 size_af, size_bf;
608
609 size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
610 size_bf = mc->vram_start & ~mc->gtt_base_align;
611 if (size_bf > size_af) {
612 if (mc->gtt_size > size_bf) {
613 dev_warn(adev->dev, "limiting GTT\n");
614 mc->gtt_size = size_bf;
615 }
616 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
617 } else {
618 if (mc->gtt_size > size_af) {
619 dev_warn(adev->dev, "limiting GTT\n");
620 mc->gtt_size = size_af;
621 }
622 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
623 }
624 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
625 dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
626 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
627}
628
629/*
630 * GPU helpers function.
631 */
632/**
633 * amdgpu_card_posted - check if the hw has already been initialized
634 *
635 * @adev: amdgpu_device pointer
636 *
637 * Check if the asic has been initialized (all asics).
638 * Used at driver startup.
639 * Returns true if initialized or false if not.
640 */
641bool amdgpu_card_posted(struct amdgpu_device *adev)
642{
643 uint32_t reg;
644
645 /* then check MEM_SIZE, in case the crtcs are off */
646 reg = RREG32(mmCONFIG_MEMSIZE);
647
648 if (reg)
649 return true;
650
651 return false;
652
653}
654
Monk Liubec86372016-09-14 19:38:08 +0800655static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
656{
657 if (amdgpu_sriov_vf(adev))
658 return false;
659
660 if (amdgpu_passthrough(adev)) {
661 /* for FIJI: In whole GPU pass-through virtualization case
662 * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
663 * so amdgpu_card_posted return false and driver will incorrectly skip vPost.
664 * but if we force vPost do in pass-through case, the driver reload will hang.
665 * whether doing vPost depends on amdgpu_card_posted if smc version is above
666 * 00160e00 for FIJI.
667 */
668 if (adev->asic_type == CHIP_FIJI) {
669 int err;
670 uint32_t fw_ver;
671 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
672 /* force vPost if error occured */
673 if (err)
674 return true;
675
676 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
677 if (fw_ver >= 0x00160e00)
678 return !amdgpu_card_posted(adev);
679 }
680 } else {
681 /* in bare-metal case, amdgpu_card_posted return false
682 * after system reboot/boot, and return true if driver
683 * reloaded.
684 * we shouldn't do vPost after driver reload otherwise GPU
685 * could hang.
686 */
687 if (amdgpu_card_posted(adev))
688 return false;
689 }
690
691 /* we assume vPost is neede for all other cases */
692 return true;
693}
694
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400695/**
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400696 * amdgpu_dummy_page_init - init dummy page used by the driver
697 *
698 * @adev: amdgpu_device pointer
699 *
700 * Allocate the dummy page used by the driver (all asics).
701 * This dummy page is used by the driver as a filler for gart entries
702 * when pages are taken out of the GART
703 * Returns 0 on sucess, -ENOMEM on failure.
704 */
705int amdgpu_dummy_page_init(struct amdgpu_device *adev)
706{
707 if (adev->dummy_page.page)
708 return 0;
709 adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
710 if (adev->dummy_page.page == NULL)
711 return -ENOMEM;
712 adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
713 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
714 if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
715 dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
716 __free_page(adev->dummy_page.page);
717 adev->dummy_page.page = NULL;
718 return -ENOMEM;
719 }
720 return 0;
721}
722
723/**
724 * amdgpu_dummy_page_fini - free dummy page used by the driver
725 *
726 * @adev: amdgpu_device pointer
727 *
728 * Frees the dummy page used by the driver (all asics).
729 */
730void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
731{
732 if (adev->dummy_page.page == NULL)
733 return;
734 pci_unmap_page(adev->pdev, adev->dummy_page.addr,
735 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
736 __free_page(adev->dummy_page.page);
737 adev->dummy_page.page = NULL;
738}
739
740
741/* ATOM accessor methods */
742/*
743 * ATOM is an interpreted byte code stored in tables in the vbios. The
744 * driver registers callbacks to access registers and the interpreter
745 * in the driver parses the tables and executes then to program specific
746 * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
747 * atombios.h, and atom.c
748 */
749
750/**
751 * cail_pll_read - read PLL register
752 *
753 * @info: atom card_info pointer
754 * @reg: PLL register offset
755 *
756 * Provides a PLL register accessor for the atom interpreter (r4xx+).
757 * Returns the value of the PLL register.
758 */
759static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
760{
761 return 0;
762}
763
764/**
765 * cail_pll_write - write PLL register
766 *
767 * @info: atom card_info pointer
768 * @reg: PLL register offset
769 * @val: value to write to the pll register
770 *
771 * Provides a PLL register accessor for the atom interpreter (r4xx+).
772 */
773static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
774{
775
776}
777
778/**
779 * cail_mc_read - read MC (Memory Controller) register
780 *
781 * @info: atom card_info pointer
782 * @reg: MC register offset
783 *
784 * Provides an MC register accessor for the atom interpreter (r4xx+).
785 * Returns the value of the MC register.
786 */
787static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
788{
789 return 0;
790}
791
792/**
793 * cail_mc_write - write MC (Memory Controller) register
794 *
795 * @info: atom card_info pointer
796 * @reg: MC register offset
797 * @val: value to write to the pll register
798 *
799 * Provides a MC register accessor for the atom interpreter (r4xx+).
800 */
801static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
802{
803
804}
805
806/**
807 * cail_reg_write - write MMIO register
808 *
809 * @info: atom card_info pointer
810 * @reg: MMIO register offset
811 * @val: value to write to the pll register
812 *
813 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
814 */
815static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
816{
817 struct amdgpu_device *adev = info->dev->dev_private;
818
819 WREG32(reg, val);
820}
821
822/**
823 * cail_reg_read - read MMIO register
824 *
825 * @info: atom card_info pointer
826 * @reg: MMIO register offset
827 *
828 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
829 * Returns the value of the MMIO register.
830 */
831static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
832{
833 struct amdgpu_device *adev = info->dev->dev_private;
834 uint32_t r;
835
836 r = RREG32(reg);
837 return r;
838}
839
840/**
841 * cail_ioreg_write - write IO register
842 *
843 * @info: atom card_info pointer
844 * @reg: IO register offset
845 * @val: value to write to the pll register
846 *
847 * Provides a IO register accessor for the atom interpreter (r4xx+).
848 */
849static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
850{
851 struct amdgpu_device *adev = info->dev->dev_private;
852
853 WREG32_IO(reg, val);
854}
855
856/**
857 * cail_ioreg_read - read IO register
858 *
859 * @info: atom card_info pointer
860 * @reg: IO register offset
861 *
862 * Provides an IO register accessor for the atom interpreter (r4xx+).
863 * Returns the value of the IO register.
864 */
865static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
866{
867 struct amdgpu_device *adev = info->dev->dev_private;
868 uint32_t r;
869
870 r = RREG32_IO(reg);
871 return r;
872}
873
874/**
875 * amdgpu_atombios_fini - free the driver info and callbacks for atombios
876 *
877 * @adev: amdgpu_device pointer
878 *
879 * Frees the driver info and register access callbacks for the ATOM
880 * interpreter (r4xx+).
881 * Called at driver shutdown.
882 */
883static void amdgpu_atombios_fini(struct amdgpu_device *adev)
884{
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800885 if (adev->mode_info.atom_context) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400886 kfree(adev->mode_info.atom_context->scratch);
Monk Liu89e0ec9f2016-05-27 19:34:11 +0800887 kfree(adev->mode_info.atom_context->iio);
888 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400889 kfree(adev->mode_info.atom_context);
890 adev->mode_info.atom_context = NULL;
891 kfree(adev->mode_info.atom_card_info);
892 adev->mode_info.atom_card_info = NULL;
893}
894
895/**
896 * amdgpu_atombios_init - init the driver info and callbacks for atombios
897 *
898 * @adev: amdgpu_device pointer
899 *
900 * Initializes the driver info and register access callbacks for the
901 * ATOM interpreter (r4xx+).
902 * Returns 0 on sucess, -ENOMEM on failure.
903 * Called at driver startup.
904 */
905static int amdgpu_atombios_init(struct amdgpu_device *adev)
906{
907 struct card_info *atom_card_info =
908 kzalloc(sizeof(struct card_info), GFP_KERNEL);
909
910 if (!atom_card_info)
911 return -ENOMEM;
912
913 adev->mode_info.atom_card_info = atom_card_info;
914 atom_card_info->dev = adev->ddev;
915 atom_card_info->reg_read = cail_reg_read;
916 atom_card_info->reg_write = cail_reg_write;
917 /* needed for iio ops */
918 if (adev->rio_mem) {
919 atom_card_info->ioreg_read = cail_ioreg_read;
920 atom_card_info->ioreg_write = cail_ioreg_write;
921 } else {
922 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
923 atom_card_info->ioreg_read = cail_reg_read;
924 atom_card_info->ioreg_write = cail_reg_write;
925 }
926 atom_card_info->mc_read = cail_mc_read;
927 atom_card_info->mc_write = cail_mc_write;
928 atom_card_info->pll_read = cail_pll_read;
929 atom_card_info->pll_write = cail_pll_write;
930
931 adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
932 if (!adev->mode_info.atom_context) {
933 amdgpu_atombios_fini(adev);
934 return -ENOMEM;
935 }
936
937 mutex_init(&adev->mode_info.atom_context->mutex);
938 amdgpu_atombios_scratch_regs_init(adev);
939 amdgpu_atom_allocate_fb_scratch(adev->mode_info.atom_context);
940 return 0;
941}
942
943/* if we get transitioned to only one device, take VGA back */
944/**
945 * amdgpu_vga_set_decode - enable/disable vga decode
946 *
947 * @cookie: amdgpu_device pointer
948 * @state: enable/disable vga decode
949 *
950 * Enable/disable vga decode (all asics).
951 * Returns VGA resource flags.
952 */
953static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
954{
955 struct amdgpu_device *adev = cookie;
956 amdgpu_asic_set_vga_state(adev, state);
957 if (state)
958 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
959 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
960 else
961 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
962}
963
964/**
965 * amdgpu_check_pot_argument - check that argument is a power of two
966 *
967 * @arg: value to check
968 *
969 * Validates that a certain argument is a power of two (all asics).
970 * Returns true if argument is valid.
971 */
972static bool amdgpu_check_pot_argument(int arg)
973{
974 return (arg & (arg - 1)) == 0;
975}
976
977/**
978 * amdgpu_check_arguments - validate module params
979 *
980 * @adev: amdgpu_device pointer
981 *
982 * Validates certain module parameters and updates
983 * the associated values used by the driver (all asics).
984 */
985static void amdgpu_check_arguments(struct amdgpu_device *adev)
986{
Chunming Zhou5b011232015-12-10 17:34:33 +0800987 if (amdgpu_sched_jobs < 4) {
988 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
989 amdgpu_sched_jobs);
990 amdgpu_sched_jobs = 4;
991 } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
992 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
993 amdgpu_sched_jobs);
994 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
995 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400996
997 if (amdgpu_gart_size != -1) {
Christian Königc4e1a132016-03-17 16:25:15 +0100998 /* gtt size must be greater or equal to 32M */
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400999 if (amdgpu_gart_size < 32) {
1000 dev_warn(adev->dev, "gart size (%d) too small\n",
1001 amdgpu_gart_size);
1002 amdgpu_gart_size = -1;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001003 }
1004 }
1005
1006 if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
1007 dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
1008 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -04001009 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001010 }
1011
1012 if (amdgpu_vm_size < 1) {
1013 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1014 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -04001015 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001016 }
1017
1018 /*
1019 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1020 */
1021 if (amdgpu_vm_size > 1024) {
1022 dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
1023 amdgpu_vm_size);
Alex Deucher8dacc122015-05-11 16:20:58 -04001024 amdgpu_vm_size = 8;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001025 }
1026
1027 /* defines number of bits in page table versus page directory,
1028 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1029 * page table and the remaining bits are in the page directory */
1030 if (amdgpu_vm_block_size == -1) {
1031
1032 /* Total bits covered by PD + PTs */
1033 unsigned bits = ilog2(amdgpu_vm_size) + 18;
1034
1035 /* Make sure the PD is 4K in size up to 8GB address space.
1036 Above that split equal between PD and PTs */
1037 if (amdgpu_vm_size <= 8)
1038 amdgpu_vm_block_size = bits - 9;
1039 else
1040 amdgpu_vm_block_size = (bits + 3) / 2;
1041
1042 } else if (amdgpu_vm_block_size < 9) {
1043 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1044 amdgpu_vm_block_size);
1045 amdgpu_vm_block_size = 9;
1046 }
1047
1048 if (amdgpu_vm_block_size > 24 ||
1049 (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
1050 dev_warn(adev->dev, "VM page table size (%d) too large\n",
1051 amdgpu_vm_block_size);
1052 amdgpu_vm_block_size = 9;
1053 }
1054}
1055
1056/**
1057 * amdgpu_switcheroo_set_state - set switcheroo state
1058 *
1059 * @pdev: pci dev pointer
Lukas Wunner16944672015-09-05 11:17:35 +02001060 * @state: vga_switcheroo state
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001061 *
1062 * Callback for the switcheroo driver. Suspends or resumes the
1063 * the asics before or after it is powered up using ACPI methods.
1064 */
1065static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1066{
1067 struct drm_device *dev = pci_get_drvdata(pdev);
1068
1069 if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1070 return;
1071
1072 if (state == VGA_SWITCHEROO_ON) {
1073 unsigned d3_delay = dev->pdev->d3_delay;
1074
1075 printk(KERN_INFO "amdgpu: switched on\n");
1076 /* don't suspend or resume card normally */
1077 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1078
Alex Deucher810ddc32016-08-23 13:25:49 -04001079 amdgpu_device_resume(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001080
1081 dev->pdev->d3_delay = d3_delay;
1082
1083 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1084 drm_kms_helper_poll_enable(dev);
1085 } else {
1086 printk(KERN_INFO "amdgpu: switched off\n");
1087 drm_kms_helper_poll_disable(dev);
1088 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
Alex Deucher810ddc32016-08-23 13:25:49 -04001089 amdgpu_device_suspend(dev, true, true);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001090 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1091 }
1092}
1093
1094/**
1095 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1096 *
1097 * @pdev: pci dev pointer
1098 *
1099 * Callback for the switcheroo driver. Check of the switcheroo
1100 * state can be changed.
1101 * Returns true if the state can be changed, false if not.
1102 */
1103static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1104{
1105 struct drm_device *dev = pci_get_drvdata(pdev);
1106
1107 /*
1108 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1109 * locking inversion with the driver load path. And the access here is
1110 * completely racy anyway. So don't bother with locking for now.
1111 */
1112 return dev->open_count == 0;
1113}
1114
1115static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1116 .set_gpu_state = amdgpu_switcheroo_set_state,
1117 .reprobe = NULL,
1118 .can_switch = amdgpu_switcheroo_can_switch,
1119};
1120
1121int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001122 enum amd_ip_block_type block_type,
1123 enum amd_clockgating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001124{
1125 int i, r = 0;
1126
1127 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001128 if (!adev->ip_block_status[i].valid)
1129 continue;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001130 if (adev->ip_blocks[i].type == block_type) {
yanyang15fc3aee2015-05-22 14:39:35 -04001131 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001132 state);
1133 if (r)
1134 return r;
Alex Deuchera225bf12016-06-23 11:48:30 -04001135 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001136 }
1137 }
1138 return r;
1139}
1140
1141int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001142 enum amd_ip_block_type block_type,
1143 enum amd_powergating_state state)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001144{
1145 int i, r = 0;
1146
1147 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001148 if (!adev->ip_block_status[i].valid)
1149 continue;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001150 if (adev->ip_blocks[i].type == block_type) {
yanyang15fc3aee2015-05-22 14:39:35 -04001151 r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001152 state);
1153 if (r)
1154 return r;
Alex Deuchera225bf12016-06-23 11:48:30 -04001155 break;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001156 }
1157 }
1158 return r;
1159}
1160
Alex Deucher5dbbb602016-06-23 11:41:04 -04001161int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1162 enum amd_ip_block_type block_type)
1163{
1164 int i, r;
1165
1166 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001167 if (!adev->ip_block_status[i].valid)
1168 continue;
Alex Deucher5dbbb602016-06-23 11:41:04 -04001169 if (adev->ip_blocks[i].type == block_type) {
1170 r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
1171 if (r)
1172 return r;
1173 break;
1174 }
1175 }
1176 return 0;
1177
1178}
1179
1180bool amdgpu_is_idle(struct amdgpu_device *adev,
1181 enum amd_ip_block_type block_type)
1182{
1183 int i;
1184
1185 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher9ecbe7f2016-06-23 11:53:12 -04001186 if (!adev->ip_block_status[i].valid)
1187 continue;
Alex Deucher5dbbb602016-06-23 11:41:04 -04001188 if (adev->ip_blocks[i].type == block_type)
1189 return adev->ip_blocks[i].funcs->is_idle((void *)adev);
1190 }
1191 return true;
1192
1193}
1194
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001195const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
1196 struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001197 enum amd_ip_block_type type)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001198{
1199 int i;
1200
1201 for (i = 0; i < adev->num_ip_blocks; i++)
1202 if (adev->ip_blocks[i].type == type)
1203 return &adev->ip_blocks[i];
1204
1205 return NULL;
1206}
1207
1208/**
1209 * amdgpu_ip_block_version_cmp
1210 *
1211 * @adev: amdgpu_device pointer
yanyang15fc3aee2015-05-22 14:39:35 -04001212 * @type: enum amd_ip_block_type
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001213 * @major: major version
1214 * @minor: minor version
1215 *
1216 * return 0 if equal or greater
1217 * return 1 if smaller or the ip_block doesn't exist
1218 */
1219int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -04001220 enum amd_ip_block_type type,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001221 u32 major, u32 minor)
1222{
1223 const struct amdgpu_ip_block_version *ip_block;
1224 ip_block = amdgpu_get_ip_block(adev, type);
1225
1226 if (ip_block && ((ip_block->major > major) ||
1227 ((ip_block->major == major) &&
1228 (ip_block->minor >= minor))))
1229 return 0;
1230
1231 return 1;
1232}
1233
Emily Deng9accf2f2016-08-10 16:01:25 +08001234static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev)
1235{
1236 adev->enable_virtual_display = false;
1237
1238 if (amdgpu_virtual_display) {
1239 struct drm_device *ddev = adev->ddev;
1240 const char *pci_address_name = pci_name(ddev->pdev);
1241 char *pciaddstr, *pciaddstr_tmp, *pciaddname;
1242
1243 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1244 pciaddstr_tmp = pciaddstr;
1245 while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) {
1246 if (!strcmp(pci_address_name, pciaddname)) {
1247 adev->enable_virtual_display = true;
1248 break;
1249 }
1250 }
1251
1252 DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n",
1253 amdgpu_virtual_display, pci_address_name,
1254 adev->enable_virtual_display);
1255
1256 kfree(pciaddstr);
1257 }
1258}
1259
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001260static int amdgpu_early_init(struct amdgpu_device *adev)
1261{
Alex Deucheraaa36a92015-04-20 17:31:14 -04001262 int i, r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001263
Emily Deng9accf2f2016-08-10 16:01:25 +08001264 amdgpu_whether_enable_virtual_display(adev);
Emily Denga6be7572016-08-08 11:37:50 +08001265
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001266 switch (adev->asic_type) {
Alex Deucheraaa36a92015-04-20 17:31:14 -04001267 case CHIP_TOPAZ:
1268 case CHIP_TONGA:
David Zhang48299f92015-07-08 01:05:16 +08001269 case CHIP_FIJI:
Flora Cui2cc0c0b2016-03-14 18:33:29 -04001270 case CHIP_POLARIS11:
1271 case CHIP_POLARIS10:
Alex Deucheraaa36a92015-04-20 17:31:14 -04001272 case CHIP_CARRIZO:
Samuel Li39bb0c92015-10-08 16:31:43 -04001273 case CHIP_STONEY:
1274 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
Alex Deucheraaa36a92015-04-20 17:31:14 -04001275 adev->family = AMDGPU_FAMILY_CZ;
1276 else
1277 adev->family = AMDGPU_FAMILY_VI;
1278
1279 r = vi_set_ip_blocks(adev);
1280 if (r)
1281 return r;
1282 break;
Ken Wang33f34802016-01-21 17:29:41 +08001283#ifdef CONFIG_DRM_AMDGPU_SI
1284 case CHIP_VERDE:
1285 case CHIP_TAHITI:
1286 case CHIP_PITCAIRN:
1287 case CHIP_OLAND:
1288 case CHIP_HAINAN:
Ken Wang295d0da2016-05-24 21:02:53 +08001289 adev->family = AMDGPU_FAMILY_SI;
Ken Wang33f34802016-01-21 17:29:41 +08001290 r = si_set_ip_blocks(adev);
1291 if (r)
1292 return r;
1293 break;
1294#endif
Alex Deuchera2e73f52015-04-20 17:09:27 -04001295#ifdef CONFIG_DRM_AMDGPU_CIK
1296 case CHIP_BONAIRE:
1297 case CHIP_HAWAII:
1298 case CHIP_KAVERI:
1299 case CHIP_KABINI:
1300 case CHIP_MULLINS:
1301 if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
1302 adev->family = AMDGPU_FAMILY_CI;
1303 else
1304 adev->family = AMDGPU_FAMILY_KV;
1305
1306 r = cik_set_ip_blocks(adev);
1307 if (r)
1308 return r;
1309 break;
1310#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001311 default:
1312 /* FIXME: not supported yet */
1313 return -EINVAL;
1314 }
1315
Alex Deucher8faf0e02015-07-28 11:50:31 -04001316 adev->ip_block_status = kcalloc(adev->num_ip_blocks,
1317 sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
1318 if (adev->ip_block_status == NULL)
Alex Deucherd8d090b2015-06-26 13:02:57 -04001319 return -ENOMEM;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001320
1321 if (adev->ip_blocks == NULL) {
1322 DRM_ERROR("No IP blocks found!\n");
1323 return r;
1324 }
1325
1326 for (i = 0; i < adev->num_ip_blocks; i++) {
1327 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1328 DRM_ERROR("disabled ip block: %d\n", i);
Alex Deucher8faf0e02015-07-28 11:50:31 -04001329 adev->ip_block_status[i].valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001330 } else {
1331 if (adev->ip_blocks[i].funcs->early_init) {
yanyang15fc3aee2015-05-22 14:39:35 -04001332 r = adev->ip_blocks[i].funcs->early_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001333 if (r == -ENOENT) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001334 adev->ip_block_status[i].valid = false;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001335 } else if (r) {
Tom St Denis88a907d2016-05-04 14:28:35 -04001336 DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001337 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001338 } else {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001339 adev->ip_block_status[i].valid = true;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001340 }
Alex Deucher974e6b62015-07-10 13:59:44 -04001341 } else {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001342 adev->ip_block_status[i].valid = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001343 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001344 }
1345 }
1346
Nicolai Hähnle395d1fb2016-06-02 12:32:07 +02001347 adev->cg_flags &= amdgpu_cg_mask;
1348 adev->pg_flags &= amdgpu_pg_mask;
1349
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001350 return 0;
1351}
1352
1353static int amdgpu_init(struct amdgpu_device *adev)
1354{
1355 int i, r;
1356
1357 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001358 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001359 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001360 r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001361 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001362 DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001363 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001364 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001365 adev->ip_block_status[i].sw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001366 /* need to do gmc hw init early so we can allocate gpu mem */
yanyang15fc3aee2015-05-22 14:39:35 -04001367 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001368 r = amdgpu_vram_scratch_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001369 if (r) {
1370 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001371 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001372 }
yanyang15fc3aee2015-05-22 14:39:35 -04001373 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001374 if (r) {
1375 DRM_ERROR("hw_init %d failed %d\n", i, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001376 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001377 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001378 r = amdgpu_wb_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001379 if (r) {
1380 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001381 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001382 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001383 adev->ip_block_status[i].hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001384 }
1385 }
1386
1387 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001388 if (!adev->ip_block_status[i].sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001389 continue;
1390 /* gmc hw init is done early */
yanyang15fc3aee2015-05-22 14:39:35 -04001391 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001392 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001393 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001394 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001395 DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001396 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001397 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001398 adev->ip_block_status[i].hw = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001399 }
1400
1401 return 0;
1402}
1403
1404static int amdgpu_late_init(struct amdgpu_device *adev)
1405{
1406 int i = 0, r;
1407
1408 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001409 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001410 continue;
Rex Zhud932f372016-09-13 19:38:38 +08001411 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
1412 adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
1413 continue;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001414 /* enable clockgating to save power */
yanyang15fc3aee2015-05-22 14:39:35 -04001415 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1416 AMD_CG_STATE_GATE);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001417 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001418 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001419 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001420 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001421 if (adev->ip_blocks[i].funcs->late_init) {
yanyang15fc3aee2015-05-22 14:39:35 -04001422 r = adev->ip_blocks[i].funcs->late_init((void *)adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001423 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001424 DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001425 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001426 }
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001427 adev->ip_block_status[i].late_initialized = true;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001428 }
1429 }
1430
1431 return 0;
1432}
1433
1434static int amdgpu_fini(struct amdgpu_device *adev)
1435{
1436 int i, r;
1437
1438 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001439 if (!adev->ip_block_status[i].hw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001440 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001441 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001442 amdgpu_wb_fini(adev);
1443 amdgpu_vram_scratch_fini(adev);
1444 }
1445 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
yanyang15fc3aee2015-05-22 14:39:35 -04001446 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1447 AMD_CG_STATE_UNGATE);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001448 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001449 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001450 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001451 }
yanyang15fc3aee2015-05-22 14:39:35 -04001452 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001453 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001454 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001455 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001456 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001457 adev->ip_block_status[i].hw = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001458 }
1459
1460 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001461 if (!adev->ip_block_status[i].sw)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001462 continue;
yanyang15fc3aee2015-05-22 14:39:35 -04001463 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001464 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001465 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001466 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001467 }
Alex Deucher8faf0e02015-07-28 11:50:31 -04001468 adev->ip_block_status[i].sw = false;
1469 adev->ip_block_status[i].valid = false;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001470 }
1471
Monk Liua6dcfd92016-05-19 14:36:34 +08001472 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001473 if (!adev->ip_block_status[i].late_initialized)
1474 continue;
Monk Liua6dcfd92016-05-19 14:36:34 +08001475 if (adev->ip_blocks[i].funcs->late_fini)
1476 adev->ip_blocks[i].funcs->late_fini((void *)adev);
Grazvydas Ignotas8a2eef12016-10-03 00:06:44 +03001477 adev->ip_block_status[i].late_initialized = false;
Monk Liua6dcfd92016-05-19 14:36:34 +08001478 }
1479
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001480 return 0;
1481}
1482
1483static int amdgpu_suspend(struct amdgpu_device *adev)
1484{
1485 int i, r;
1486
Flora Cuic5a93a22016-02-26 10:45:25 +08001487 /* ungate SMC block first */
1488 r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
1489 AMD_CG_STATE_UNGATE);
1490 if (r) {
1491 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
1492 }
1493
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001494 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001495 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001496 continue;
1497 /* ungate blocks so that suspend can properly shut them down */
Flora Cuic5a93a22016-02-26 10:45:25 +08001498 if (i != AMD_IP_BLOCK_TYPE_SMC) {
1499 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1500 AMD_CG_STATE_UNGATE);
1501 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001502 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Flora Cuic5a93a22016-02-26 10:45:25 +08001503 }
Alex Deucher2c1a2782015-12-07 17:02:53 -05001504 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001505 /* XXX handle errors */
1506 r = adev->ip_blocks[i].funcs->suspend(adev);
1507 /* XXX handle errors */
Alex Deucher2c1a2782015-12-07 17:02:53 -05001508 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001509 DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001510 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001511 }
1512
1513 return 0;
1514}
1515
1516static int amdgpu_resume(struct amdgpu_device *adev)
1517{
1518 int i, r;
1519
1520 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deucher8faf0e02015-07-28 11:50:31 -04001521 if (!adev->ip_block_status[i].valid)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001522 continue;
1523 r = adev->ip_blocks[i].funcs->resume(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001524 if (r) {
Tom St Denis822b2ce2016-05-05 10:23:40 -04001525 DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001526 return r;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001527 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001528 }
1529
1530 return 0;
1531}
1532
Monk Liu4e99a442016-03-31 13:26:59 +08001533static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
Andres Rodriguez048765a2016-06-11 02:51:32 -04001534{
Monk Liu4e99a442016-03-31 13:26:59 +08001535 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
1536 adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
Andres Rodriguez048765a2016-06-11 02:51:32 -04001537}
1538
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001539/**
1540 * amdgpu_device_init - initialize the driver
1541 *
1542 * @adev: amdgpu_device pointer
1543 * @pdev: drm dev pointer
1544 * @pdev: pci dev pointer
1545 * @flags: driver flags
1546 *
1547 * Initializes the driver info and hw (all asics).
1548 * Returns 0 for success or an error on failure.
1549 * Called at driver startup.
1550 */
1551int amdgpu_device_init(struct amdgpu_device *adev,
1552 struct drm_device *ddev,
1553 struct pci_dev *pdev,
1554 uint32_t flags)
1555{
1556 int r, i;
1557 bool runtime = false;
Marek Olšák95844d22016-08-17 23:49:27 +02001558 u32 max_MBps;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001559
1560 adev->shutdown = false;
1561 adev->dev = &pdev->dev;
1562 adev->ddev = ddev;
1563 adev->pdev = pdev;
1564 adev->flags = flags;
Jammy Zhou2f7d10b2015-07-22 11:29:01 +08001565 adev->asic_type = flags & AMD_ASIC_MASK;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001566 adev->is_atom_bios = false;
1567 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
1568 adev->mc.gtt_size = 512 * 1024 * 1024;
1569 adev->accel_working = false;
1570 adev->num_rings = 0;
1571 adev->mman.buffer_funcs = NULL;
1572 adev->mman.buffer_funcs_ring = NULL;
1573 adev->vm_manager.vm_pte_funcs = NULL;
Christian König2d55e452016-02-08 17:37:38 +01001574 adev->vm_manager.vm_pte_num_rings = 0;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001575 adev->gart.gart_funcs = NULL;
1576 adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1577
1578 adev->smc_rreg = &amdgpu_invalid_rreg;
1579 adev->smc_wreg = &amdgpu_invalid_wreg;
1580 adev->pcie_rreg = &amdgpu_invalid_rreg;
1581 adev->pcie_wreg = &amdgpu_invalid_wreg;
Huang Rui36b9a952016-08-31 13:23:25 +08001582 adev->pciep_rreg = &amdgpu_invalid_rreg;
1583 adev->pciep_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001584 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
1585 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1586 adev->didt_rreg = &amdgpu_invalid_rreg;
1587 adev->didt_wreg = &amdgpu_invalid_wreg;
Rex Zhuccdbb202016-06-08 12:47:41 +08001588 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1589 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001590 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1591 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1592
Rex Zhuccdbb202016-06-08 12:47:41 +08001593
Alex Deucher3e39ab92015-06-05 15:04:33 -04001594 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1595 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1596 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001597
1598 /* mutex initialization are all done here so we
1599 * can recall function without having locking issues */
Christian König8d0a7ce2015-11-03 20:58:50 +01001600 mutex_init(&adev->vm_manager.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001601 atomic_set(&adev->irq.ih.lock, 0);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001602 mutex_init(&adev->pm.mutex);
1603 mutex_init(&adev->gfx.gpu_clock_mutex);
1604 mutex_init(&adev->srbm_mutex);
1605 mutex_init(&adev->grbm_idx_mutex);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001606 mutex_init(&adev->mn_lock);
1607 hash_init(adev->mn_hash);
1608
1609 amdgpu_check_arguments(adev);
1610
1611 /* Registers mapping */
1612 /* TODO: block userspace mapping of io register */
1613 spin_lock_init(&adev->mmio_idx_lock);
1614 spin_lock_init(&adev->smc_idx_lock);
1615 spin_lock_init(&adev->pcie_idx_lock);
1616 spin_lock_init(&adev->uvd_ctx_idx_lock);
1617 spin_lock_init(&adev->didt_idx_lock);
Rex Zhuccdbb202016-06-08 12:47:41 +08001618 spin_lock_init(&adev->gc_cac_idx_lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001619 spin_lock_init(&adev->audio_endpt_idx_lock);
Marek Olšák95844d22016-08-17 23:49:27 +02001620 spin_lock_init(&adev->mm_stats.lock);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001621
Chunming Zhou0c4e7fa2016-08-17 11:41:30 +08001622 INIT_LIST_HEAD(&adev->shadow_list);
1623 mutex_init(&adev->shadow_list_lock);
1624
Chunming Zhou5c1354b2016-08-30 16:13:10 +08001625 INIT_LIST_HEAD(&adev->gtt_list);
1626 spin_lock_init(&adev->gtt_list_lock);
1627
Ken Wangda69c1612016-01-21 19:08:55 +08001628 if (adev->asic_type >= CHIP_BONAIRE) {
1629 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
1630 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
1631 } else {
1632 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
1633 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
1634 }
Chunming Zhou5c1354b2016-08-30 16:13:10 +08001635
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001636 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
1637 if (adev->rmmio == NULL) {
1638 return -ENOMEM;
1639 }
1640 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
1641 DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
1642
Ken Wangda69c1612016-01-21 19:08:55 +08001643 if (adev->asic_type >= CHIP_BONAIRE)
1644 /* doorbell bar mapping */
1645 amdgpu_doorbell_init(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001646
1647 /* io port mapping */
1648 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1649 if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
1650 adev->rio_mem_size = pci_resource_len(adev->pdev, i);
1651 adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
1652 break;
1653 }
1654 }
1655 if (adev->rio_mem == NULL)
1656 DRM_ERROR("Unable to find PCI I/O BAR\n");
1657
1658 /* early init functions */
1659 r = amdgpu_early_init(adev);
1660 if (r)
1661 return r;
1662
1663 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1664 /* this will fail for cards that aren't VGA class devices, just
1665 * ignore it */
1666 vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
1667
1668 if (amdgpu_runtime_pm == 1)
1669 runtime = true;
Alex Deuchere9bef452016-04-25 13:12:18 -04001670 if (amdgpu_device_is_px(ddev))
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001671 runtime = true;
1672 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1673 if (runtime)
1674 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1675
1676 /* Read BIOS */
Alex Deucher83ba1262016-06-03 18:21:41 -04001677 if (!amdgpu_get_bios(adev)) {
1678 r = -EINVAL;
1679 goto failed;
1680 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001681 /* Must be an ATOMBIOS */
1682 if (!adev->is_atom_bios) {
1683 dev_err(adev->dev, "Expecting atombios for GPU\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001684 r = -EINVAL;
1685 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001686 }
1687 r = amdgpu_atombios_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001688 if (r) {
1689 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001690 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001691 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001692
Monk Liu4e99a442016-03-31 13:26:59 +08001693 /* detect if we are with an SRIOV vbios */
1694 amdgpu_device_detect_sriov_bios(adev);
Andres Rodriguez048765a2016-06-11 02:51:32 -04001695
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001696 /* Post card if necessary */
Monk Liubec86372016-09-14 19:38:08 +08001697 if (amdgpu_vpost_needed(adev)) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001698 if (!adev->bios) {
Monk Liubec86372016-09-14 19:38:08 +08001699 dev_err(adev->dev, "no vBIOS found\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001700 r = -EINVAL;
1701 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001702 }
Monk Liubec86372016-09-14 19:38:08 +08001703 DRM_INFO("GPU posting now...\n");
Monk Liu4e99a442016-03-31 13:26:59 +08001704 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1705 if (r) {
1706 dev_err(adev->dev, "gpu post error!\n");
1707 goto failed;
1708 }
1709 } else {
1710 DRM_INFO("GPU post is not needed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001711 }
1712
1713 /* Initialize clocks */
1714 r = amdgpu_atombios_get_clock_info(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001715 if (r) {
1716 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001717 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001718 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001719 /* init i2c buses */
1720 amdgpu_atombios_i2c_init(adev);
1721
1722 /* Fence driver */
1723 r = amdgpu_fence_driver_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001724 if (r) {
1725 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001726 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001727 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001728
1729 /* init the mode config */
1730 drm_mode_config_init(adev->ddev);
1731
1732 r = amdgpu_init(adev);
1733 if (r) {
Alex Deucher2c1a2782015-12-07 17:02:53 -05001734 dev_err(adev->dev, "amdgpu_init failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001735 amdgpu_fini(adev);
Alex Deucher83ba1262016-06-03 18:21:41 -04001736 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001737 }
1738
1739 adev->accel_working = true;
1740
Marek Olšák95844d22016-08-17 23:49:27 +02001741 /* Initialize the buffer migration limit. */
1742 if (amdgpu_moverate >= 0)
1743 max_MBps = amdgpu_moverate;
1744 else
1745 max_MBps = 8; /* Allow 8 MB/s. */
1746 /* Get a log2 for easy divisions. */
1747 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
1748
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001749 amdgpu_fbdev_init(adev);
1750
1751 r = amdgpu_ib_pool_init(adev);
1752 if (r) {
1753 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
Alex Deucher83ba1262016-06-03 18:21:41 -04001754 goto failed;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001755 }
1756
1757 r = amdgpu_ib_ring_tests(adev);
1758 if (r)
1759 DRM_ERROR("ib ring test failed (%d).\n", r);
1760
1761 r = amdgpu_gem_debugfs_init(adev);
1762 if (r) {
1763 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1764 }
1765
1766 r = amdgpu_debugfs_regs_init(adev);
1767 if (r) {
1768 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1769 }
1770
Huang Rui50ab2532016-06-12 15:51:09 +08001771 r = amdgpu_debugfs_firmware_init(adev);
1772 if (r) {
1773 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1774 return r;
1775 }
1776
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001777 if ((amdgpu_testing & 1)) {
1778 if (adev->accel_working)
1779 amdgpu_test_moves(adev);
1780 else
1781 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
1782 }
1783 if ((amdgpu_testing & 2)) {
1784 if (adev->accel_working)
1785 amdgpu_test_syncing(adev);
1786 else
1787 DRM_INFO("amdgpu: acceleration disabled, skipping sync tests\n");
1788 }
1789 if (amdgpu_benchmarking) {
1790 if (adev->accel_working)
1791 amdgpu_benchmark(adev, amdgpu_benchmarking);
1792 else
1793 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
1794 }
1795
1796 /* enable clockgating, etc. after ib tests, etc. since some blocks require
1797 * explicit gating rather than handling it automatically.
1798 */
1799 r = amdgpu_late_init(adev);
Alex Deucher2c1a2782015-12-07 17:02:53 -05001800 if (r) {
1801 dev_err(adev->dev, "amdgpu_late_init failed\n");
Alex Deucher83ba1262016-06-03 18:21:41 -04001802 goto failed;
Alex Deucher2c1a2782015-12-07 17:02:53 -05001803 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001804
1805 return 0;
Alex Deucher83ba1262016-06-03 18:21:41 -04001806
1807failed:
1808 if (runtime)
1809 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1810 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001811}
1812
1813static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
1814
1815/**
1816 * amdgpu_device_fini - tear down the driver
1817 *
1818 * @adev: amdgpu_device pointer
1819 *
1820 * Tear down the driver info (all asics).
1821 * Called at driver shutdown.
1822 */
1823void amdgpu_device_fini(struct amdgpu_device *adev)
1824{
1825 int r;
1826
1827 DRM_INFO("amdgpu: finishing device.\n");
1828 adev->shutdown = true;
1829 /* evict vram memory */
1830 amdgpu_bo_evict_vram(adev);
1831 amdgpu_ib_pool_fini(adev);
1832 amdgpu_fence_driver_fini(adev);
Lukas Wunner84b89bd2016-06-08 18:47:27 +02001833 drm_crtc_force_disable_all(adev->ddev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001834 amdgpu_fbdev_fini(adev);
1835 r = amdgpu_fini(adev);
Alex Deucher8faf0e02015-07-28 11:50:31 -04001836 kfree(adev->ip_block_status);
1837 adev->ip_block_status = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001838 adev->accel_working = false;
1839 /* free i2c buses */
1840 amdgpu_i2c_fini(adev);
1841 amdgpu_atombios_fini(adev);
1842 kfree(adev->bios);
1843 adev->bios = NULL;
1844 vga_switcheroo_unregister_client(adev->pdev);
Alex Deucher83ba1262016-06-03 18:21:41 -04001845 if (adev->flags & AMD_IS_PX)
1846 vga_switcheroo_fini_domain_pm_ops(adev->dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001847 vga_client_register(adev->pdev, NULL, NULL, NULL);
1848 if (adev->rio_mem)
1849 pci_iounmap(adev->pdev, adev->rio_mem);
1850 adev->rio_mem = NULL;
1851 iounmap(adev->rmmio);
1852 adev->rmmio = NULL;
Ken Wangda69c1612016-01-21 19:08:55 +08001853 if (adev->asic_type >= CHIP_BONAIRE)
1854 amdgpu_doorbell_fini(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001855 amdgpu_debugfs_regs_cleanup(adev);
1856 amdgpu_debugfs_remove_files(adev);
1857}
1858
1859
1860/*
1861 * Suspend & resume.
1862 */
1863/**
Alex Deucher810ddc32016-08-23 13:25:49 -04001864 * amdgpu_device_suspend - initiate device suspend
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001865 *
1866 * @pdev: drm dev pointer
1867 * @state: suspend state
1868 *
1869 * Puts the hw in the suspend state (all asics).
1870 * Returns 0 for success or an error on failure.
1871 * Called at driver suspend.
1872 */
Alex Deucher810ddc32016-08-23 13:25:49 -04001873int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001874{
1875 struct amdgpu_device *adev;
1876 struct drm_crtc *crtc;
1877 struct drm_connector *connector;
Alex Deucher5ceb54c2015-08-05 12:41:48 -04001878 int r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001879
1880 if (dev == NULL || dev->dev_private == NULL) {
1881 return -ENODEV;
1882 }
1883
1884 adev = dev->dev_private;
1885
Alex Deuchere313de72016-09-19 12:17:22 -04001886 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001887 return 0;
1888
1889 drm_kms_helper_poll_disable(dev);
1890
1891 /* turn off display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001892 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001893 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1894 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1895 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04001896 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001897
Alex Deucher756e6882015-10-08 00:03:36 -04001898 /* unpin the front buffers and cursors */
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001899 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
Alex Deucher756e6882015-10-08 00:03:36 -04001900 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001901 struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
1902 struct amdgpu_bo *robj;
1903
Alex Deucher756e6882015-10-08 00:03:36 -04001904 if (amdgpu_crtc->cursor_bo) {
1905 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1906 r = amdgpu_bo_reserve(aobj, false);
1907 if (r == 0) {
1908 amdgpu_bo_unpin(aobj);
1909 amdgpu_bo_unreserve(aobj);
1910 }
1911 }
1912
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001913 if (rfb == NULL || rfb->obj == NULL) {
1914 continue;
1915 }
1916 robj = gem_to_amdgpu_bo(rfb->obj);
1917 /* don't unpin kernel fb objects */
1918 if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
1919 r = amdgpu_bo_reserve(robj, false);
1920 if (r == 0) {
1921 amdgpu_bo_unpin(robj);
1922 amdgpu_bo_unreserve(robj);
1923 }
1924 }
1925 }
1926 /* evict vram memory */
1927 amdgpu_bo_evict_vram(adev);
1928
Alex Deucher5ceb54c2015-08-05 12:41:48 -04001929 amdgpu_fence_driver_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001930
1931 r = amdgpu_suspend(adev);
1932
1933 /* evict remaining vram memory */
1934 amdgpu_bo_evict_vram(adev);
1935
1936 pci_save_state(dev->pdev);
1937 if (suspend) {
1938 /* Shut down the device */
1939 pci_disable_device(dev->pdev);
1940 pci_set_power_state(dev->pdev, PCI_D3hot);
jimqu74b0b152016-09-07 17:09:12 +08001941 } else {
1942 r = amdgpu_asic_reset(adev);
1943 if (r)
1944 DRM_ERROR("amdgpu asic reset failed\n");
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001945 }
1946
1947 if (fbcon) {
1948 console_lock();
1949 amdgpu_fbdev_set_suspend(adev, 1);
1950 console_unlock();
1951 }
1952 return 0;
1953}
1954
1955/**
Alex Deucher810ddc32016-08-23 13:25:49 -04001956 * amdgpu_device_resume - initiate device resume
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001957 *
1958 * @pdev: drm dev pointer
1959 *
1960 * Bring the hw back to operating state (all asics).
1961 * Returns 0 for success or an error on failure.
1962 * Called at driver resume.
1963 */
Alex Deucher810ddc32016-08-23 13:25:49 -04001964int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001965{
1966 struct drm_connector *connector;
1967 struct amdgpu_device *adev = dev->dev_private;
Alex Deucher756e6882015-10-08 00:03:36 -04001968 struct drm_crtc *crtc;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001969 int r;
1970
Alex Deuchere313de72016-09-19 12:17:22 -04001971 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001972 return 0;
1973
jimqu74b0b152016-09-07 17:09:12 +08001974 if (fbcon)
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001975 console_lock();
jimqu74b0b152016-09-07 17:09:12 +08001976
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001977 if (resume) {
1978 pci_set_power_state(dev->pdev, PCI_D0);
1979 pci_restore_state(dev->pdev);
jimqu74b0b152016-09-07 17:09:12 +08001980 r = pci_enable_device(dev->pdev);
1981 if (r) {
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001982 if (fbcon)
1983 console_unlock();
jimqu74b0b152016-09-07 17:09:12 +08001984 return r;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001985 }
1986 }
1987
1988 /* post card */
jimqu74b0b152016-09-07 17:09:12 +08001989 if (!amdgpu_card_posted(adev) || !resume) {
1990 r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
1991 if (r)
1992 DRM_ERROR("amdgpu asic init failed\n");
1993 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001994
1995 r = amdgpu_resume(adev);
Flora Cuica198522016-02-04 15:10:08 +08001996 if (r)
1997 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001998
Alex Deucher5ceb54c2015-08-05 12:41:48 -04001999 amdgpu_fence_driver_resume(adev);
2000
Flora Cuica198522016-02-04 15:10:08 +08002001 if (resume) {
2002 r = amdgpu_ib_ring_tests(adev);
2003 if (r)
2004 DRM_ERROR("ib ring test failed (%d).\n", r);
2005 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002006
2007 r = amdgpu_late_init(adev);
2008 if (r)
2009 return r;
2010
Alex Deucher756e6882015-10-08 00:03:36 -04002011 /* pin cursors */
2012 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2013 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2014
2015 if (amdgpu_crtc->cursor_bo) {
2016 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2017 r = amdgpu_bo_reserve(aobj, false);
2018 if (r == 0) {
2019 r = amdgpu_bo_pin(aobj,
2020 AMDGPU_GEM_DOMAIN_VRAM,
2021 &amdgpu_crtc->cursor_addr);
2022 if (r != 0)
2023 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
2024 amdgpu_bo_unreserve(aobj);
2025 }
2026 }
2027 }
2028
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002029 /* blat the mode back in */
2030 if (fbcon) {
2031 drm_helper_resume_force_mode(dev);
2032 /* turn on display hw */
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002033 drm_modeset_lock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002034 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2035 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2036 }
Alex Deucher4c7fbc32015-09-23 14:32:06 -04002037 drm_modeset_unlock_all(dev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002038 }
2039
2040 drm_kms_helper_poll_enable(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002041
2042 /*
2043 * Most of the connector probing functions try to acquire runtime pm
2044 * refs to ensure that the GPU is powered on when connector polling is
2045 * performed. Since we're calling this from a runtime PM callback,
2046 * trying to acquire rpm refs will cause us to deadlock.
2047 *
2048 * Since we're guaranteed to be holding the rpm lock, it's safe to
2049 * temporarily disable the rpm helpers so this doesn't deadlock us.
2050 */
2051#ifdef CONFIG_PM
2052 dev->dev->power.disable_depth++;
2053#endif
Alex Deucher54fb2a52015-11-24 14:30:56 -05002054 drm_helper_hpd_irq_event(dev);
Lyude23a1a9e2016-07-18 11:41:37 -04002055#ifdef CONFIG_PM
2056 dev->dev->power.disable_depth--;
2057#endif
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002058
2059 if (fbcon) {
2060 amdgpu_fbdev_set_suspend(adev, 0);
2061 console_unlock();
2062 }
2063
2064 return 0;
2065}
2066
Chunming Zhou63fbf422016-07-15 11:19:20 +08002067static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2068{
2069 int i;
2070 bool asic_hang = false;
2071
2072 for (i = 0; i < adev->num_ip_blocks; i++) {
2073 if (!adev->ip_block_status[i].valid)
2074 continue;
2075 if (adev->ip_blocks[i].funcs->check_soft_reset)
2076 adev->ip_blocks[i].funcs->check_soft_reset(adev);
2077 if (adev->ip_block_status[i].hang) {
2078 DRM_INFO("IP block:%d is hang!\n", i);
2079 asic_hang = true;
2080 }
2081 }
2082 return asic_hang;
2083}
2084
Baoyou Xie4d446652016-09-18 22:09:35 +08002085static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
Chunming Zhoud31a5012016-07-18 10:04:34 +08002086{
2087 int i, r = 0;
2088
2089 for (i = 0; i < adev->num_ip_blocks; i++) {
2090 if (!adev->ip_block_status[i].valid)
2091 continue;
Chunming Zhou35d782f2016-07-15 15:57:13 +08002092 if (adev->ip_block_status[i].hang &&
2093 adev->ip_blocks[i].funcs->pre_soft_reset) {
Chunming Zhoud31a5012016-07-18 10:04:34 +08002094 r = adev->ip_blocks[i].funcs->pre_soft_reset(adev);
2095 if (r)
2096 return r;
2097 }
2098 }
2099
2100 return 0;
2101}
2102
Chunming Zhou35d782f2016-07-15 15:57:13 +08002103static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2104{
2105 if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
Chunming Zhou35d782f2016-07-15 15:57:13 +08002106 adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
Chunming Zhou35d782f2016-07-15 15:57:13 +08002107 adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
2108 adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
2109 DRM_INFO("Some block need full reset!\n");
2110 return true;
2111 }
2112 return false;
2113}
2114
2115static int amdgpu_soft_reset(struct amdgpu_device *adev)
2116{
2117 int i, r = 0;
2118
2119 for (i = 0; i < adev->num_ip_blocks; i++) {
2120 if (!adev->ip_block_status[i].valid)
2121 continue;
2122 if (adev->ip_block_status[i].hang &&
2123 adev->ip_blocks[i].funcs->soft_reset) {
2124 r = adev->ip_blocks[i].funcs->soft_reset(adev);
2125 if (r)
2126 return r;
2127 }
2128 }
2129
2130 return 0;
2131}
2132
2133static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2134{
2135 int i, r = 0;
2136
2137 for (i = 0; i < adev->num_ip_blocks; i++) {
2138 if (!adev->ip_block_status[i].valid)
2139 continue;
2140 if (adev->ip_block_status[i].hang &&
2141 adev->ip_blocks[i].funcs->post_soft_reset)
2142 r = adev->ip_blocks[i].funcs->post_soft_reset(adev);
2143 if (r)
2144 return r;
2145 }
2146
2147 return 0;
2148}
2149
Chunming Zhou3ad81f12016-08-05 17:30:17 +08002150bool amdgpu_need_backup(struct amdgpu_device *adev)
2151{
2152 if (adev->flags & AMD_IS_APU)
2153 return false;
2154
2155 return amdgpu_lockup_timeout > 0 ? true : false;
2156}
2157
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002158static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2159 struct amdgpu_ring *ring,
2160 struct amdgpu_bo *bo,
2161 struct fence **fence)
2162{
2163 uint32_t domain;
2164 int r;
2165
2166 if (!bo->shadow)
2167 return 0;
2168
2169 r = amdgpu_bo_reserve(bo, false);
2170 if (r)
2171 return r;
2172 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2173 /* if bo has been evicted, then no need to recover */
2174 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2175 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2176 NULL, fence, true);
2177 if (r) {
2178 DRM_ERROR("recover page table failed!\n");
2179 goto err;
2180 }
2181 }
2182err:
2183 amdgpu_bo_unreserve(bo);
2184 return r;
2185}
2186
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002187/**
2188 * amdgpu_gpu_reset - reset the asic
2189 *
2190 * @adev: amdgpu device pointer
2191 *
2192 * Attempt the reset the GPU if it has hung (all asics).
2193 * Returns 0 for success or an error on failure.
2194 */
2195int amdgpu_gpu_reset(struct amdgpu_device *adev)
2196{
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002197 int i, r;
2198 int resched;
Chunming Zhou35d782f2016-07-15 15:57:13 +08002199 bool need_full_reset;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002200
Chunming Zhou63fbf422016-07-15 11:19:20 +08002201 if (!amdgpu_check_soft_reset(adev)) {
2202 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2203 return 0;
2204 }
2205
Marek Olšákd94aed52015-05-05 21:13:49 +02002206 atomic_inc(&adev->gpu_reset_counter);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002207
Chunming Zhoua3c47d62016-06-30 16:44:41 +08002208 /* block TTM */
2209 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
2210
Chunming Zhou0875dc92016-06-12 15:41:58 +08002211 /* block scheduler */
2212 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2213 struct amdgpu_ring *ring = adev->rings[i];
2214
2215 if (!ring)
2216 continue;
2217 kthread_park(ring->sched.thread);
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002218 amd_sched_hw_job_reset(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002219 }
Chunming Zhou2200eda2016-06-30 16:53:02 +08002220 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2221 amdgpu_fence_driver_force_completion(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002222
Chunming Zhou35d782f2016-07-15 15:57:13 +08002223 need_full_reset = amdgpu_need_full_reset(adev);
2224
2225 if (!need_full_reset) {
2226 amdgpu_pre_soft_reset(adev);
2227 r = amdgpu_soft_reset(adev);
2228 amdgpu_post_soft_reset(adev);
2229 if (r || amdgpu_check_soft_reset(adev)) {
2230 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2231 need_full_reset = true;
2232 }
2233 }
2234
2235 if (need_full_reset) {
2236 /* save scratch */
2237 amdgpu_atombios_scratch_regs_save(adev);
2238 r = amdgpu_suspend(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002239
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002240retry:
Chunming Zhou35d782f2016-07-15 15:57:13 +08002241 /* Disable fb access */
2242 if (adev->mode_info.num_crtc) {
2243 struct amdgpu_mode_mc_save save;
2244 amdgpu_display_stop_mc_access(adev, &save);
2245 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
2246 }
Chunming Zhouf1aa7e02016-06-28 10:38:50 +08002247
Chunming Zhou35d782f2016-07-15 15:57:13 +08002248 r = amdgpu_asic_reset(adev);
2249 /* post card */
2250 amdgpu_atom_asic_init(adev->mode_info.atom_context);
Alex Deucherbfa99262016-01-15 11:59:48 -05002251
Chunming Zhou35d782f2016-07-15 15:57:13 +08002252 if (!r) {
2253 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
2254 r = amdgpu_resume(adev);
2255 }
2256 /* restore scratch */
2257 amdgpu_atombios_scratch_regs_restore(adev);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002258 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002259 if (!r) {
Chunming Zhoue72cfd52016-07-27 13:15:20 +08002260 amdgpu_irq_gpu_reset_resume_helper(adev);
Chunming Zhou2c0d7312016-08-30 16:36:25 +08002261 if (need_full_reset && amdgpu_need_backup(adev)) {
2262 r = amdgpu_ttm_recover_gart(adev);
2263 if (r)
2264 DRM_ERROR("gart recovery failed!!!\n");
2265 }
Chunming Zhou1f465082016-06-30 15:02:26 +08002266 r = amdgpu_ib_ring_tests(adev);
2267 if (r) {
2268 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
Chunming Zhou40019dc2016-06-29 16:01:49 +08002269 r = amdgpu_suspend(adev);
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002270 need_full_reset = true;
Chunming Zhou40019dc2016-06-29 16:01:49 +08002271 goto retry;
Chunming Zhou1f465082016-06-30 15:02:26 +08002272 }
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002273 /**
2274 * recovery vm page tables, since we cannot depend on VRAM is
2275 * consistent after gpu full reset.
2276 */
2277 if (need_full_reset && amdgpu_need_backup(adev)) {
2278 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2279 struct amdgpu_bo *bo, *tmp;
2280 struct fence *fence = NULL, *next = NULL;
Chunming Zhou1f465082016-06-30 15:02:26 +08002281
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002282 DRM_INFO("recover vram bo from shadow\n");
2283 mutex_lock(&adev->shadow_list_lock);
2284 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2285 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2286 if (fence) {
2287 r = fence_wait(fence, false);
2288 if (r) {
2289 WARN(r, "recovery from shadow isn't comleted\n");
2290 break;
2291 }
2292 }
2293
2294 fence_put(fence);
2295 fence = next;
2296 }
2297 mutex_unlock(&adev->shadow_list_lock);
2298 if (fence) {
2299 r = fence_wait(fence, false);
2300 if (r)
2301 WARN(r, "recovery from shadow isn't comleted\n");
2302 }
2303 fence_put(fence);
2304 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002305 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2306 struct amdgpu_ring *ring = adev->rings[i];
2307 if (!ring)
2308 continue;
Chunming Zhou53cdccd2016-07-21 17:20:52 +08002309
Chunming Zhouaa1c8902016-06-30 13:56:02 +08002310 amd_sched_job_recovery(&ring->sched);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002311 kthread_unpark(ring->sched.thread);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002312 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002313 } else {
Chunming Zhou2200eda2016-06-30 16:53:02 +08002314 dev_err(adev->dev, "asic resume failed (%d).\n", r);
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002315 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
Chunming Zhou0875dc92016-06-12 15:41:58 +08002316 if (adev->rings[i]) {
2317 kthread_unpark(adev->rings[i]->sched.thread);
Chunming Zhou0875dc92016-06-12 15:41:58 +08002318 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002319 }
2320 }
2321
2322 drm_helper_resume_force_mode(adev->ddev);
2323
2324 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
2325 if (r) {
2326 /* bad news, how to tell it to userspace ? */
2327 dev_info(adev->dev, "GPU reset failed\n");
2328 }
2329
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002330 return r;
2331}
2332
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002333void amdgpu_get_pcie_info(struct amdgpu_device *adev)
2334{
2335 u32 mask;
2336 int ret;
2337
Alex Deuchercd474ba2016-02-04 10:21:23 -05002338 if (amdgpu_pcie_gen_cap)
2339 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
2340
2341 if (amdgpu_pcie_lane_cap)
2342 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
2343
2344 /* covers APUs as well */
2345 if (pci_is_root_bus(adev->pdev->bus)) {
2346 if (adev->pm.pcie_gen_mask == 0)
2347 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2348 if (adev->pm.pcie_mlw_mask == 0)
2349 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002350 return;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002351 }
Alex Deuchercd474ba2016-02-04 10:21:23 -05002352
2353 if (adev->pm.pcie_gen_mask == 0) {
2354 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
2355 if (!ret) {
2356 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
2357 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
2358 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
2359
2360 if (mask & DRM_PCIE_SPEED_25)
2361 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
2362 if (mask & DRM_PCIE_SPEED_50)
2363 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
2364 if (mask & DRM_PCIE_SPEED_80)
2365 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
2366 } else {
2367 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
2368 }
2369 }
2370 if (adev->pm.pcie_mlw_mask == 0) {
2371 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
2372 if (!ret) {
2373 switch (mask) {
2374 case 32:
2375 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
2376 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2377 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2378 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2379 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2380 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2381 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2382 break;
2383 case 16:
2384 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
2385 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2386 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2387 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2388 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2389 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2390 break;
2391 case 12:
2392 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
2393 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2394 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2395 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2396 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2397 break;
2398 case 8:
2399 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
2400 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2401 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2402 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2403 break;
2404 case 4:
2405 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2406 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2407 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2408 break;
2409 case 2:
2410 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2411 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2412 break;
2413 case 1:
2414 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2415 break;
2416 default:
2417 break;
2418 }
2419 } else {
2420 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
Alex Deucherd0dd7f02015-11-11 19:45:06 -05002421 }
2422 }
2423}
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002424
2425/*
2426 * Debugfs
2427 */
2428int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
Nils Wallménius06ab6832016-05-02 12:46:15 -04002429 const struct drm_info_list *files,
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002430 unsigned nfiles)
2431{
2432 unsigned i;
2433
2434 for (i = 0; i < adev->debugfs_count; i++) {
2435 if (adev->debugfs[i].files == files) {
2436 /* Already registered */
2437 return 0;
2438 }
2439 }
2440
2441 i = adev->debugfs_count + 1;
2442 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
2443 DRM_ERROR("Reached maximum number of debugfs components.\n");
2444 DRM_ERROR("Report so we increase "
2445 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
2446 return -EINVAL;
2447 }
2448 adev->debugfs[adev->debugfs_count].files = files;
2449 adev->debugfs[adev->debugfs_count].num_files = nfiles;
2450 adev->debugfs_count = i;
2451#if defined(CONFIG_DEBUG_FS)
2452 drm_debugfs_create_files(files, nfiles,
2453 adev->ddev->control->debugfs_root,
2454 adev->ddev->control);
2455 drm_debugfs_create_files(files, nfiles,
2456 adev->ddev->primary->debugfs_root,
2457 adev->ddev->primary);
2458#endif
2459 return 0;
2460}
2461
2462static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
2463{
2464#if defined(CONFIG_DEBUG_FS)
2465 unsigned i;
2466
2467 for (i = 0; i < adev->debugfs_count; i++) {
2468 drm_debugfs_remove_files(adev->debugfs[i].files,
2469 adev->debugfs[i].num_files,
2470 adev->ddev->control);
2471 drm_debugfs_remove_files(adev->debugfs[i].files,
2472 adev->debugfs[i].num_files,
2473 adev->ddev->primary);
2474 }
2475#endif
2476}
2477
2478#if defined(CONFIG_DEBUG_FS)
2479
2480static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2481 size_t size, loff_t *pos)
2482{
2483 struct amdgpu_device *adev = f->f_inode->i_private;
2484 ssize_t result = 0;
2485 int r;
Tom St Denisbd122672016-07-28 09:39:22 -04002486 bool pm_pg_lock, use_bank;
Tom St Denis566281592016-06-27 11:55:07 -04002487 unsigned instance_bank, sh_bank, se_bank;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002488
2489 if (size & 0x3 || *pos & 0x3)
2490 return -EINVAL;
2491
Tom St Denisbd122672016-07-28 09:39:22 -04002492 /* are we reading registers for which a PG lock is necessary? */
2493 pm_pg_lock = (*pos >> 23) & 1;
2494
Tom St Denis566281592016-06-27 11:55:07 -04002495 if (*pos & (1ULL << 62)) {
2496 se_bank = (*pos >> 24) & 0x3FF;
2497 sh_bank = (*pos >> 34) & 0x3FF;
2498 instance_bank = (*pos >> 44) & 0x3FF;
2499 use_bank = 1;
Tom St Denis566281592016-06-27 11:55:07 -04002500 } else {
2501 use_bank = 0;
2502 }
2503
Tom St Denisbd122672016-07-28 09:39:22 -04002504 *pos &= 0x3FFFF;
2505
Tom St Denis566281592016-06-27 11:55:07 -04002506 if (use_bank) {
2507 if (sh_bank >= adev->gfx.config.max_sh_per_se ||
2508 se_bank >= adev->gfx.config.max_shader_engines)
2509 return -EINVAL;
2510 mutex_lock(&adev->grbm_idx_mutex);
2511 amdgpu_gfx_select_se_sh(adev, se_bank,
2512 sh_bank, instance_bank);
2513 }
2514
Tom St Denisbd122672016-07-28 09:39:22 -04002515 if (pm_pg_lock)
2516 mutex_lock(&adev->pm.mutex);
2517
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002518 while (size) {
2519 uint32_t value;
2520
2521 if (*pos > adev->rmmio_size)
Tom St Denis566281592016-06-27 11:55:07 -04002522 goto end;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002523
2524 value = RREG32(*pos >> 2);
2525 r = put_user(value, (uint32_t *)buf);
Tom St Denis566281592016-06-27 11:55:07 -04002526 if (r) {
2527 result = r;
2528 goto end;
2529 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002530
2531 result += 4;
2532 buf += 4;
2533 *pos += 4;
2534 size -= 4;
2535 }
2536
Tom St Denis566281592016-06-27 11:55:07 -04002537end:
2538 if (use_bank) {
2539 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2540 mutex_unlock(&adev->grbm_idx_mutex);
2541 }
2542
Tom St Denisbd122672016-07-28 09:39:22 -04002543 if (pm_pg_lock)
2544 mutex_unlock(&adev->pm.mutex);
2545
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002546 return result;
2547}
2548
2549static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2550 size_t size, loff_t *pos)
2551{
2552 struct amdgpu_device *adev = f->f_inode->i_private;
2553 ssize_t result = 0;
2554 int r;
2555
2556 if (size & 0x3 || *pos & 0x3)
2557 return -EINVAL;
2558
2559 while (size) {
2560 uint32_t value;
2561
2562 if (*pos > adev->rmmio_size)
2563 return result;
2564
2565 r = get_user(value, (uint32_t *)buf);
2566 if (r)
2567 return r;
2568
2569 WREG32(*pos >> 2, value);
2570
2571 result += 4;
2572 buf += 4;
2573 *pos += 4;
2574 size -= 4;
2575 }
2576
2577 return result;
2578}
2579
Tom St Denisadcec282016-04-15 13:08:44 -04002580static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
2581 size_t size, loff_t *pos)
2582{
2583 struct amdgpu_device *adev = f->f_inode->i_private;
2584 ssize_t result = 0;
2585 int r;
2586
2587 if (size & 0x3 || *pos & 0x3)
2588 return -EINVAL;
2589
2590 while (size) {
2591 uint32_t value;
2592
2593 value = RREG32_PCIE(*pos >> 2);
2594 r = put_user(value, (uint32_t *)buf);
2595 if (r)
2596 return r;
2597
2598 result += 4;
2599 buf += 4;
2600 *pos += 4;
2601 size -= 4;
2602 }
2603
2604 return result;
2605}
2606
2607static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
2608 size_t size, loff_t *pos)
2609{
2610 struct amdgpu_device *adev = f->f_inode->i_private;
2611 ssize_t result = 0;
2612 int r;
2613
2614 if (size & 0x3 || *pos & 0x3)
2615 return -EINVAL;
2616
2617 while (size) {
2618 uint32_t value;
2619
2620 r = get_user(value, (uint32_t *)buf);
2621 if (r)
2622 return r;
2623
2624 WREG32_PCIE(*pos >> 2, value);
2625
2626 result += 4;
2627 buf += 4;
2628 *pos += 4;
2629 size -= 4;
2630 }
2631
2632 return result;
2633}
2634
2635static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
2636 size_t size, loff_t *pos)
2637{
2638 struct amdgpu_device *adev = f->f_inode->i_private;
2639 ssize_t result = 0;
2640 int r;
2641
2642 if (size & 0x3 || *pos & 0x3)
2643 return -EINVAL;
2644
2645 while (size) {
2646 uint32_t value;
2647
2648 value = RREG32_DIDT(*pos >> 2);
2649 r = put_user(value, (uint32_t *)buf);
2650 if (r)
2651 return r;
2652
2653 result += 4;
2654 buf += 4;
2655 *pos += 4;
2656 size -= 4;
2657 }
2658
2659 return result;
2660}
2661
2662static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
2663 size_t size, loff_t *pos)
2664{
2665 struct amdgpu_device *adev = f->f_inode->i_private;
2666 ssize_t result = 0;
2667 int r;
2668
2669 if (size & 0x3 || *pos & 0x3)
2670 return -EINVAL;
2671
2672 while (size) {
2673 uint32_t value;
2674
2675 r = get_user(value, (uint32_t *)buf);
2676 if (r)
2677 return r;
2678
2679 WREG32_DIDT(*pos >> 2, value);
2680
2681 result += 4;
2682 buf += 4;
2683 *pos += 4;
2684 size -= 4;
2685 }
2686
2687 return result;
2688}
2689
2690static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
2691 size_t size, loff_t *pos)
2692{
2693 struct amdgpu_device *adev = f->f_inode->i_private;
2694 ssize_t result = 0;
2695 int r;
2696
2697 if (size & 0x3 || *pos & 0x3)
2698 return -EINVAL;
2699
2700 while (size) {
2701 uint32_t value;
2702
Tom St Denis6fc0dea2016-08-29 08:39:29 -04002703 value = RREG32_SMC(*pos);
Tom St Denisadcec282016-04-15 13:08:44 -04002704 r = put_user(value, (uint32_t *)buf);
2705 if (r)
2706 return r;
2707
2708 result += 4;
2709 buf += 4;
2710 *pos += 4;
2711 size -= 4;
2712 }
2713
2714 return result;
2715}
2716
2717static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
2718 size_t size, loff_t *pos)
2719{
2720 struct amdgpu_device *adev = f->f_inode->i_private;
2721 ssize_t result = 0;
2722 int r;
2723
2724 if (size & 0x3 || *pos & 0x3)
2725 return -EINVAL;
2726
2727 while (size) {
2728 uint32_t value;
2729
2730 r = get_user(value, (uint32_t *)buf);
2731 if (r)
2732 return r;
2733
Tom St Denis6fc0dea2016-08-29 08:39:29 -04002734 WREG32_SMC(*pos, value);
Tom St Denisadcec282016-04-15 13:08:44 -04002735
2736 result += 4;
2737 buf += 4;
2738 *pos += 4;
2739 size -= 4;
2740 }
2741
2742 return result;
2743}
2744
Tom St Denis1e051412016-06-27 09:57:18 -04002745static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2746 size_t size, loff_t *pos)
2747{
2748 struct amdgpu_device *adev = f->f_inode->i_private;
2749 ssize_t result = 0;
2750 int r;
2751 uint32_t *config, no_regs = 0;
2752
2753 if (size & 0x3 || *pos & 0x3)
2754 return -EINVAL;
2755
Markus Elfringecab7662016-09-18 17:00:52 +02002756 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
Tom St Denis1e051412016-06-27 09:57:18 -04002757 if (!config)
2758 return -ENOMEM;
2759
2760 /* version, increment each time something is added */
Tom St Denise9f11dc2016-08-17 12:00:51 -04002761 config[no_regs++] = 2;
Tom St Denis1e051412016-06-27 09:57:18 -04002762 config[no_regs++] = adev->gfx.config.max_shader_engines;
2763 config[no_regs++] = adev->gfx.config.max_tile_pipes;
2764 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
2765 config[no_regs++] = adev->gfx.config.max_sh_per_se;
2766 config[no_regs++] = adev->gfx.config.max_backends_per_se;
2767 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
2768 config[no_regs++] = adev->gfx.config.max_gprs;
2769 config[no_regs++] = adev->gfx.config.max_gs_threads;
2770 config[no_regs++] = adev->gfx.config.max_hw_contexts;
2771 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
2772 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
2773 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
2774 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
2775 config[no_regs++] = adev->gfx.config.num_tile_pipes;
2776 config[no_regs++] = adev->gfx.config.backend_enable_mask;
2777 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
2778 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
2779 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
2780 config[no_regs++] = adev->gfx.config.num_gpus;
2781 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
2782 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
2783 config[no_regs++] = adev->gfx.config.gb_addr_config;
2784 config[no_regs++] = adev->gfx.config.num_rbs;
2785
Tom St Denis89a8f302016-08-12 15:14:31 -04002786 /* rev==1 */
2787 config[no_regs++] = adev->rev_id;
2788 config[no_regs++] = adev->pg_flags;
2789 config[no_regs++] = adev->cg_flags;
2790
Tom St Denise9f11dc2016-08-17 12:00:51 -04002791 /* rev==2 */
2792 config[no_regs++] = adev->family;
2793 config[no_regs++] = adev->external_rev_id;
2794
Tom St Denis1e051412016-06-27 09:57:18 -04002795 while (size && (*pos < no_regs * 4)) {
2796 uint32_t value;
2797
2798 value = config[*pos >> 2];
2799 r = put_user(value, (uint32_t *)buf);
2800 if (r) {
2801 kfree(config);
2802 return r;
2803 }
2804
2805 result += 4;
2806 buf += 4;
2807 *pos += 4;
2808 size -= 4;
2809 }
2810
2811 kfree(config);
2812 return result;
2813}
2814
Tom St Denisf2cdaf22016-09-15 10:08:44 -04002815static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
2816 size_t size, loff_t *pos)
2817{
2818 struct amdgpu_device *adev = f->f_inode->i_private;
2819 int idx, r;
2820 int32_t value;
2821
2822 if (size != 4 || *pos & 0x3)
2823 return -EINVAL;
2824
2825 /* convert offset to sensor number */
2826 idx = *pos >> 2;
2827
2828 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
2829 r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
2830 else
2831 return -EINVAL;
2832
2833 if (!r)
2834 r = put_user(value, (int32_t *)buf);
2835
2836 return !r ? 4 : r;
2837}
Tom St Denis1e051412016-06-27 09:57:18 -04002838
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002839static const struct file_operations amdgpu_debugfs_regs_fops = {
2840 .owner = THIS_MODULE,
2841 .read = amdgpu_debugfs_regs_read,
2842 .write = amdgpu_debugfs_regs_write,
2843 .llseek = default_llseek
2844};
Tom St Denisadcec282016-04-15 13:08:44 -04002845static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
2846 .owner = THIS_MODULE,
2847 .read = amdgpu_debugfs_regs_didt_read,
2848 .write = amdgpu_debugfs_regs_didt_write,
2849 .llseek = default_llseek
2850};
2851static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
2852 .owner = THIS_MODULE,
2853 .read = amdgpu_debugfs_regs_pcie_read,
2854 .write = amdgpu_debugfs_regs_pcie_write,
2855 .llseek = default_llseek
2856};
2857static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
2858 .owner = THIS_MODULE,
2859 .read = amdgpu_debugfs_regs_smc_read,
2860 .write = amdgpu_debugfs_regs_smc_write,
2861 .llseek = default_llseek
2862};
2863
Tom St Denis1e051412016-06-27 09:57:18 -04002864static const struct file_operations amdgpu_debugfs_gca_config_fops = {
2865 .owner = THIS_MODULE,
2866 .read = amdgpu_debugfs_gca_config_read,
2867 .llseek = default_llseek
2868};
2869
Tom St Denisf2cdaf22016-09-15 10:08:44 -04002870static const struct file_operations amdgpu_debugfs_sensors_fops = {
2871 .owner = THIS_MODULE,
2872 .read = amdgpu_debugfs_sensor_read,
2873 .llseek = default_llseek
2874};
2875
Tom St Denisadcec282016-04-15 13:08:44 -04002876static const struct file_operations *debugfs_regs[] = {
2877 &amdgpu_debugfs_regs_fops,
2878 &amdgpu_debugfs_regs_didt_fops,
2879 &amdgpu_debugfs_regs_pcie_fops,
2880 &amdgpu_debugfs_regs_smc_fops,
Tom St Denis1e051412016-06-27 09:57:18 -04002881 &amdgpu_debugfs_gca_config_fops,
Tom St Denisf2cdaf22016-09-15 10:08:44 -04002882 &amdgpu_debugfs_sensors_fops,
Tom St Denisadcec282016-04-15 13:08:44 -04002883};
2884
2885static const char *debugfs_regs_names[] = {
2886 "amdgpu_regs",
2887 "amdgpu_regs_didt",
2888 "amdgpu_regs_pcie",
2889 "amdgpu_regs_smc",
Tom St Denis1e051412016-06-27 09:57:18 -04002890 "amdgpu_gca_config",
Tom St Denisf2cdaf22016-09-15 10:08:44 -04002891 "amdgpu_sensors",
Tom St Denisadcec282016-04-15 13:08:44 -04002892};
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002893
2894static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2895{
2896 struct drm_minor *minor = adev->ddev->primary;
2897 struct dentry *ent, *root = minor->debugfs_root;
Tom St Denisadcec282016-04-15 13:08:44 -04002898 unsigned i, j;
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002899
Tom St Denisadcec282016-04-15 13:08:44 -04002900 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
2901 ent = debugfs_create_file(debugfs_regs_names[i],
2902 S_IFREG | S_IRUGO, root,
2903 adev, debugfs_regs[i]);
2904 if (IS_ERR(ent)) {
2905 for (j = 0; j < i; j++) {
2906 debugfs_remove(adev->debugfs_regs[i]);
2907 adev->debugfs_regs[i] = NULL;
2908 }
2909 return PTR_ERR(ent);
2910 }
2911
2912 if (!i)
2913 i_size_write(ent->d_inode, adev->rmmio_size);
2914 adev->debugfs_regs[i] = ent;
2915 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002916
2917 return 0;
2918}
2919
2920static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
2921{
Tom St Denisadcec282016-04-15 13:08:44 -04002922 unsigned i;
2923
2924 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
2925 if (adev->debugfs_regs[i]) {
2926 debugfs_remove(adev->debugfs_regs[i]);
2927 adev->debugfs_regs[i] = NULL;
2928 }
2929 }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002930}
2931
2932int amdgpu_debugfs_init(struct drm_minor *minor)
2933{
2934 return 0;
2935}
2936
2937void amdgpu_debugfs_cleanup(struct drm_minor *minor)
2938{
2939}
Alexander Kuleshov7cebc722015-06-27 13:16:05 +06002940#else
2941static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2942{
2943 return 0;
2944}
2945static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
Alex Deucherd38ceaf2015-04-20 16:55:21 -04002946#endif