blob: 71a57b2f7f04b20226f2356d0c7842cfac019b81 [file] [log] [blame]
Chunming Zhoud03846a2015-07-28 14:20:03 -04001/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
Chunming Zhou57ff96c2015-04-24 17:38:20 +080024#include <linux/list.h>
25#include <linux/slab.h>
Chunming Zhou97cb7f62015-05-22 11:33:31 -040026#include <linux/pci.h>
Chunming Zhou57ff96c2015-04-24 17:38:20 +080027#include <drm/drmP.h>
Jammy Zhoubf3911b02015-05-13 18:58:05 +080028#include <linux/firmware.h>
Chunming Zhou57ff96c2015-04-24 17:38:20 +080029#include <drm/amdgpu_drm.h>
Chunming Zhoud03846a2015-07-28 14:20:03 -040030#include "amdgpu.h"
Chunming Zhou25da4422015-05-22 12:14:04 -040031#include "atom.h"
Jammy Zhoubf3911b02015-05-13 18:58:05 +080032#include "amdgpu_ucode.h"
33
Chunming Zhoud03846a2015-07-28 14:20:03 -040034struct amdgpu_cgs_device {
35 struct cgs_device base;
36 struct amdgpu_device *adev;
37};
38
39#define CGS_FUNC_ADEV \
40 struct amdgpu_device *adev = \
41 ((struct amdgpu_cgs_device *)cgs_device)->adev
42
Rex Zhuba89a3e2017-09-25 20:45:52 +080043
Dave Airlie110e6f22016-04-12 13:25:48 +100044static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
Chunming Zhoud03846a2015-07-28 14:20:03 -040045{
Chunming Zhouaba684d2015-05-22 11:29:30 -040046 CGS_FUNC_ADEV;
47 return RREG32(offset);
Chunming Zhoud03846a2015-07-28 14:20:03 -040048}
49
Dave Airlie110e6f22016-04-12 13:25:48 +100050static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
Chunming Zhoud03846a2015-07-28 14:20:03 -040051 uint32_t value)
52{
Chunming Zhouaba684d2015-05-22 11:29:30 -040053 CGS_FUNC_ADEV;
54 WREG32(offset, value);
Chunming Zhoud03846a2015-07-28 14:20:03 -040055}
56
Dave Airlie110e6f22016-04-12 13:25:48 +100057static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
Chunming Zhoud03846a2015-07-28 14:20:03 -040058 enum cgs_ind_reg space,
59 unsigned index)
60{
Chunming Zhouaba684d2015-05-22 11:29:30 -040061 CGS_FUNC_ADEV;
62 switch (space) {
63 case CGS_IND_REG__MMIO:
64 return RREG32_IDX(index);
65 case CGS_IND_REG__PCIE:
66 return RREG32_PCIE(index);
67 case CGS_IND_REG__SMC:
68 return RREG32_SMC(index);
69 case CGS_IND_REG__UVD_CTX:
70 return RREG32_UVD_CTX(index);
71 case CGS_IND_REG__DIDT:
72 return RREG32_DIDT(index);
Rex Zhuccdbb202016-06-08 12:47:41 +080073 case CGS_IND_REG_GC_CAC:
74 return RREG32_GC_CAC(index);
Evan Quanc62a59d2017-07-04 09:24:34 +080075 case CGS_IND_REG_SE_CAC:
76 return RREG32_SE_CAC(index);
Chunming Zhouaba684d2015-05-22 11:29:30 -040077 case CGS_IND_REG__AUDIO_ENDPT:
78 DRM_ERROR("audio endpt register access not implemented.\n");
79 return 0;
80 }
81 WARN(1, "Invalid indirect register space");
Chunming Zhoud03846a2015-07-28 14:20:03 -040082 return 0;
83}
84
Dave Airlie110e6f22016-04-12 13:25:48 +100085static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
Chunming Zhoud03846a2015-07-28 14:20:03 -040086 enum cgs_ind_reg space,
87 unsigned index, uint32_t value)
88{
Chunming Zhouaba684d2015-05-22 11:29:30 -040089 CGS_FUNC_ADEV;
90 switch (space) {
91 case CGS_IND_REG__MMIO:
92 return WREG32_IDX(index, value);
93 case CGS_IND_REG__PCIE:
94 return WREG32_PCIE(index, value);
95 case CGS_IND_REG__SMC:
96 return WREG32_SMC(index, value);
97 case CGS_IND_REG__UVD_CTX:
98 return WREG32_UVD_CTX(index, value);
99 case CGS_IND_REG__DIDT:
100 return WREG32_DIDT(index, value);
Rex Zhuccdbb202016-06-08 12:47:41 +0800101 case CGS_IND_REG_GC_CAC:
102 return WREG32_GC_CAC(index, value);
Evan Quanc62a59d2017-07-04 09:24:34 +0800103 case CGS_IND_REG_SE_CAC:
104 return WREG32_SE_CAC(index, value);
Chunming Zhouaba684d2015-05-22 11:29:30 -0400105 case CGS_IND_REG__AUDIO_ENDPT:
106 DRM_ERROR("audio endpt register access not implemented.\n");
107 return;
108 }
109 WARN(1, "Invalid indirect register space");
Chunming Zhoud03846a2015-07-28 14:20:03 -0400110}
111
Dave Airlie110e6f22016-04-12 13:25:48 +1000112static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
Alex Deucherba228ac2015-12-23 11:25:43 -0500113 enum cgs_resource_type resource_type,
114 uint64_t size,
115 uint64_t offset,
116 uint64_t *resource_base)
117{
118 CGS_FUNC_ADEV;
119
120 if (resource_base == NULL)
121 return -EINVAL;
122
123 switch (resource_type) {
124 case CGS_RESOURCE_TYPE_MMIO:
125 if (adev->rmmio_size == 0)
126 return -ENOENT;
127 if ((offset + size) > adev->rmmio_size)
128 return -EINVAL;
129 *resource_base = adev->rmmio_base;
130 return 0;
131 case CGS_RESOURCE_TYPE_DOORBELL:
132 if (adev->doorbell.size == 0)
133 return -ENOENT;
134 if ((offset + size) > adev->doorbell.size)
135 return -EINVAL;
136 *resource_base = adev->doorbell.base;
137 return 0;
138 case CGS_RESOURCE_TYPE_FB:
139 case CGS_RESOURCE_TYPE_IO:
140 case CGS_RESOURCE_TYPE_ROM:
141 default:
142 return -EINVAL;
143 }
144}
145
Dave Airlie110e6f22016-04-12 13:25:48 +1000146static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
Chunming Zhoud03846a2015-07-28 14:20:03 -0400147 unsigned table, uint16_t *size,
148 uint8_t *frev, uint8_t *crev)
149{
Chunming Zhou25da4422015-05-22 12:14:04 -0400150 CGS_FUNC_ADEV;
151 uint16_t data_start;
152
153 if (amdgpu_atom_parse_data_header(
154 adev->mode_info.atom_context, table, size,
155 frev, crev, &data_start))
156 return (uint8_t*)adev->mode_info.atom_context->bios +
157 data_start;
158
Chunming Zhoud03846a2015-07-28 14:20:03 -0400159 return NULL;
160}
161
Dave Airlie110e6f22016-04-12 13:25:48 +1000162static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
Chunming Zhoud03846a2015-07-28 14:20:03 -0400163 uint8_t *frev, uint8_t *crev)
164{
Chunming Zhou25da4422015-05-22 12:14:04 -0400165 CGS_FUNC_ADEV;
166
167 if (amdgpu_atom_parse_cmd_header(
168 adev->mode_info.atom_context, table,
169 frev, crev))
170 return 0;
171
172 return -EINVAL;
Chunming Zhoud03846a2015-07-28 14:20:03 -0400173}
174
Dave Airlie110e6f22016-04-12 13:25:48 +1000175static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
Chunming Zhoud03846a2015-07-28 14:20:03 -0400176 void *args)
177{
Chunming Zhou25da4422015-05-22 12:14:04 -0400178 CGS_FUNC_ADEV;
Chunming Zhoud03846a2015-07-28 14:20:03 -0400179
Chunming Zhou25da4422015-05-22 12:14:04 -0400180 return amdgpu_atom_execute_table(
181 adev->mode_info.atom_context, table, args);
182}
Chunming Zhoud03846a2015-07-28 14:20:03 -0400183
Baoyou Xie761c2e82016-09-03 13:57:14 +0800184static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
rezhu404b2fa2015-08-07 13:37:56 +0800185 enum amd_ip_block_type block_type,
186 enum amd_clockgating_state state)
187{
188 CGS_FUNC_ADEV;
189 int i, r = -1;
190
191 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -0400192 if (!adev->ip_blocks[i].status.valid)
rezhu404b2fa2015-08-07 13:37:56 +0800193 continue;
194
Alex Deuchera1255102016-10-13 17:41:13 -0400195 if (adev->ip_blocks[i].version->type == block_type) {
196 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
rezhu404b2fa2015-08-07 13:37:56 +0800197 (void *)adev,
198 state);
199 break;
200 }
201 }
202 return r;
203}
204
Baoyou Xie761c2e82016-09-03 13:57:14 +0800205static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
rezhu404b2fa2015-08-07 13:37:56 +0800206 enum amd_ip_block_type block_type,
207 enum amd_powergating_state state)
208{
209 CGS_FUNC_ADEV;
210 int i, r = -1;
211
212 for (i = 0; i < adev->num_ip_blocks; i++) {
Alex Deuchera1255102016-10-13 17:41:13 -0400213 if (!adev->ip_blocks[i].status.valid)
rezhu404b2fa2015-08-07 13:37:56 +0800214 continue;
215
Alex Deuchera1255102016-10-13 17:41:13 -0400216 if (adev->ip_blocks[i].version->type == block_type) {
217 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
rezhu404b2fa2015-08-07 13:37:56 +0800218 (void *)adev,
219 state);
220 break;
221 }
222 }
223 return r;
224}
225
226
Dave Airlie110e6f22016-04-12 13:25:48 +1000227static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800228{
229 CGS_FUNC_ADEV;
230 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
231
232 switch (fw_type) {
233 case CGS_UCODE_ID_SDMA0:
234 result = AMDGPU_UCODE_ID_SDMA0;
235 break;
236 case CGS_UCODE_ID_SDMA1:
237 result = AMDGPU_UCODE_ID_SDMA1;
238 break;
239 case CGS_UCODE_ID_CP_CE:
240 result = AMDGPU_UCODE_ID_CP_CE;
241 break;
242 case CGS_UCODE_ID_CP_PFP:
243 result = AMDGPU_UCODE_ID_CP_PFP;
244 break;
245 case CGS_UCODE_ID_CP_ME:
246 result = AMDGPU_UCODE_ID_CP_ME;
247 break;
248 case CGS_UCODE_ID_CP_MEC:
249 case CGS_UCODE_ID_CP_MEC_JT1:
250 result = AMDGPU_UCODE_ID_CP_MEC1;
251 break;
252 case CGS_UCODE_ID_CP_MEC_JT2:
Monk Liu4c2b2452016-09-27 16:39:58 +0800253 /* for VI. JT2 should be the same as JT1, because:
254 1, MEC2 and MEC1 use exactly same FW.
255 2, JT2 is not pached but JT1 is.
256 */
257 if (adev->asic_type >= CHIP_TOPAZ)
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800258 result = AMDGPU_UCODE_ID_CP_MEC1;
Monk Liu4c2b2452016-09-27 16:39:58 +0800259 else
260 result = AMDGPU_UCODE_ID_CP_MEC2;
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800261 break;
262 case CGS_UCODE_ID_RLC_G:
263 result = AMDGPU_UCODE_ID_RLC_G;
264 break;
Monk Liubed57122016-09-26 16:35:03 +0800265 case CGS_UCODE_ID_STORAGE:
266 result = AMDGPU_UCODE_ID_STORAGE;
267 break;
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800268 default:
269 DRM_ERROR("Firmware type not supported\n");
270 }
271 return result;
272}
273
Monk Liua3927462016-05-31 13:44:30 +0800274static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
275{
276 CGS_FUNC_ADEV;
277 if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
278 release_firmware(adev->pm.fw);
Huang Rui5c1104b2016-12-19 15:15:35 +0800279 adev->pm.fw = NULL;
Monk Liua3927462016-05-31 13:44:30 +0800280 return 0;
281 }
282 /* cannot release other firmware because they are not created by cgs */
283 return -EINVAL;
284}
285
Frank Minfc76cbf2016-04-27 18:53:29 +0800286static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
287 enum cgs_ucode_id type)
288{
289 CGS_FUNC_ADEV;
Xiangliang Yu188a3012016-11-24 16:28:46 +0800290 uint16_t fw_version = 0;
Frank Minfc76cbf2016-04-27 18:53:29 +0800291
292 switch (type) {
293 case CGS_UCODE_ID_SDMA0:
294 fw_version = adev->sdma.instance[0].fw_version;
295 break;
296 case CGS_UCODE_ID_SDMA1:
297 fw_version = adev->sdma.instance[1].fw_version;
298 break;
299 case CGS_UCODE_ID_CP_CE:
300 fw_version = adev->gfx.ce_fw_version;
301 break;
302 case CGS_UCODE_ID_CP_PFP:
303 fw_version = adev->gfx.pfp_fw_version;
304 break;
305 case CGS_UCODE_ID_CP_ME:
306 fw_version = adev->gfx.me_fw_version;
307 break;
308 case CGS_UCODE_ID_CP_MEC:
309 fw_version = adev->gfx.mec_fw_version;
310 break;
311 case CGS_UCODE_ID_CP_MEC_JT1:
312 fw_version = adev->gfx.mec_fw_version;
313 break;
314 case CGS_UCODE_ID_CP_MEC_JT2:
315 fw_version = adev->gfx.mec_fw_version;
316 break;
317 case CGS_UCODE_ID_RLC_G:
318 fw_version = adev->gfx.rlc_fw_version;
319 break;
Xiangliang Yu188a3012016-11-24 16:28:46 +0800320 case CGS_UCODE_ID_STORAGE:
321 break;
Frank Minfc76cbf2016-04-27 18:53:29 +0800322 default:
323 DRM_ERROR("firmware type %d do not have version\n", type);
Xiangliang Yu188a3012016-11-24 16:28:46 +0800324 break;
Frank Minfc76cbf2016-04-27 18:53:29 +0800325 }
326 return fw_version;
327}
328
Rex Zhue8a95b22016-12-21 20:30:58 +0800329static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
330 bool en)
331{
332 CGS_FUNC_ADEV;
333
334 if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
335 adev->gfx.rlc.funcs->exit_safe_mode == NULL)
336 return 0;
337
338 if (en)
339 adev->gfx.rlc.funcs->enter_safe_mode(adev);
340 else
341 adev->gfx.rlc.funcs->exit_safe_mode(adev);
342
343 return 0;
344}
345
Evan Quan209ee272017-07-04 15:37:09 +0800346static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
347 bool lock)
348{
349 CGS_FUNC_ADEV;
350
351 if (lock)
352 mutex_lock(&adev->grbm_idx_mutex);
353 else
354 mutex_unlock(&adev->grbm_idx_mutex);
355}
356
Dave Airlie110e6f22016-04-12 13:25:48 +1000357static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800358 enum cgs_ucode_id type,
359 struct cgs_firmware_info *info)
360{
361 CGS_FUNC_ADEV;
362
yanyang1735f0022016-02-05 17:39:37 +0800363 if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800364 uint64_t gpu_addr;
365 uint32_t data_size;
366 const struct gfx_firmware_header_v1_0 *header;
367 enum AMDGPU_UCODE_ID id;
368 struct amdgpu_firmware_info *ucode;
369
370 id = fw_type_convert(cgs_device, type);
371 ucode = &adev->firmware.ucode[id];
372 if (ucode->fw == NULL)
373 return -EINVAL;
374
375 gpu_addr = ucode->mc_addr;
376 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
377 data_size = le32_to_cpu(header->header.ucode_size_bytes);
378
379 if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
380 (type == CGS_UCODE_ID_CP_MEC_JT2)) {
Monk Liu4c2b2452016-09-27 16:39:58 +0800381 gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800382 data_size = le32_to_cpu(header->jt_size) << 2;
383 }
Monk Liu4c2b2452016-09-27 16:39:58 +0800384
385 info->kptr = ucode->kaddr;
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800386 info->image_size = data_size;
Monk Liu4c2b2452016-09-27 16:39:58 +0800387 info->mc_addr = gpu_addr;
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800388 info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
Monk Liu4c2b2452016-09-27 16:39:58 +0800389
390 if (CGS_UCODE_ID_CP_MEC == type)
Evan Quane68760b2017-08-10 15:17:56 +0800391 info->image_size = le32_to_cpu(header->jt_offset) << 2;
Monk Liu4c2b2452016-09-27 16:39:58 +0800392
Frank Minfc76cbf2016-04-27 18:53:29 +0800393 info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800394 info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
395 } else {
396 char fw_name[30] = {0};
397 int err = 0;
398 uint32_t ucode_size;
399 uint32_t ucode_start_address;
400 const uint8_t *src;
401 const struct smc_firmware_header_v1_0 *hdr;
Huang Ruid1de1ed2017-02-16 11:53:38 +0800402 const struct common_firmware_header *header;
403 struct amdgpu_firmware_info *ucode = NULL;
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800404
Mykola Lysenko0b455412016-03-30 05:50:11 -0400405 if (!adev->pm.fw) {
406 switch (adev->asic_type) {
Rex Zhu6df98552017-09-08 14:05:51 +0800407 case CHIP_TAHITI:
408 strcpy(fw_name, "radeon/tahiti_smc.bin");
409 break;
410 case CHIP_PITCAIRN:
411 if ((adev->pdev->revision == 0x81) &&
412 ((adev->pdev->device == 0x6810) ||
413 (adev->pdev->device == 0x6811))) {
414 info->is_kicker = true;
415 strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
416 } else {
417 strcpy(fw_name, "radeon/pitcairn_smc.bin");
418 }
419 break;
420 case CHIP_VERDE:
421 if (((adev->pdev->device == 0x6820) &&
422 ((adev->pdev->revision == 0x81) ||
423 (adev->pdev->revision == 0x83))) ||
424 ((adev->pdev->device == 0x6821) &&
425 ((adev->pdev->revision == 0x83) ||
426 (adev->pdev->revision == 0x87))) ||
427 ((adev->pdev->revision == 0x87) &&
428 ((adev->pdev->device == 0x6823) ||
429 (adev->pdev->device == 0x682b)))) {
430 info->is_kicker = true;
431 strcpy(fw_name, "radeon/verde_k_smc.bin");
432 } else {
433 strcpy(fw_name, "radeon/verde_smc.bin");
434 }
435 break;
436 case CHIP_OLAND:
437 if (((adev->pdev->revision == 0x81) &&
438 ((adev->pdev->device == 0x6600) ||
439 (adev->pdev->device == 0x6604) ||
440 (adev->pdev->device == 0x6605) ||
441 (adev->pdev->device == 0x6610))) ||
442 ((adev->pdev->revision == 0x83) &&
443 (adev->pdev->device == 0x6610))) {
444 info->is_kicker = true;
445 strcpy(fw_name, "radeon/oland_k_smc.bin");
446 } else {
447 strcpy(fw_name, "radeon/oland_smc.bin");
448 }
449 break;
450 case CHIP_HAINAN:
451 if (((adev->pdev->revision == 0x81) &&
452 (adev->pdev->device == 0x6660)) ||
453 ((adev->pdev->revision == 0x83) &&
454 ((adev->pdev->device == 0x6660) ||
455 (adev->pdev->device == 0x6663) ||
456 (adev->pdev->device == 0x6665) ||
457 (adev->pdev->device == 0x6667)))) {
458 info->is_kicker = true;
459 strcpy(fw_name, "radeon/hainan_k_smc.bin");
460 } else if ((adev->pdev->revision == 0xc3) &&
461 (adev->pdev->device == 0x6665)) {
462 info->is_kicker = true;
463 strcpy(fw_name, "radeon/banks_k_2_smc.bin");
464 } else {
465 strcpy(fw_name, "radeon/hainan_smc.bin");
466 }
467 break;
468 case CHIP_BONAIRE:
469 if ((adev->pdev->revision == 0x80) ||
470 (adev->pdev->revision == 0x81) ||
471 (adev->pdev->device == 0x665f)) {
472 info->is_kicker = true;
473 strcpy(fw_name, "radeon/bonaire_k_smc.bin");
474 } else {
475 strcpy(fw_name, "radeon/bonaire_smc.bin");
476 }
477 break;
478 case CHIP_HAWAII:
479 if (adev->pdev->revision == 0x80) {
480 info->is_kicker = true;
481 strcpy(fw_name, "radeon/hawaii_k_smc.bin");
482 } else {
483 strcpy(fw_name, "radeon/hawaii_smc.bin");
484 }
485 break;
Huang Rui340efe22016-06-19 23:55:14 +0800486 case CHIP_TOPAZ:
Alex Deucher3b496622016-10-27 18:33:00 -0400487 if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
488 ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
Huang Rui5d7213b2017-02-10 16:42:19 +0800489 ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
490 info->is_kicker = true;
Alex Deucher3b496622016-10-27 18:33:00 -0400491 strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
Huang Rui5d7213b2017-02-10 16:42:19 +0800492 } else
Alex Deucher3b496622016-10-27 18:33:00 -0400493 strcpy(fw_name, "amdgpu/topaz_smc.bin");
Huang Rui340efe22016-06-19 23:55:14 +0800494 break;
Mykola Lysenko0b455412016-03-30 05:50:11 -0400495 case CHIP_TONGA:
Alex Deucher646cccb2016-10-26 16:41:39 -0400496 if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
Huang Rui5d7213b2017-02-10 16:42:19 +0800497 ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
498 info->is_kicker = true;
Alex Deucher646cccb2016-10-26 16:41:39 -0400499 strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
Huang Rui5d7213b2017-02-10 16:42:19 +0800500 } else
Alex Deucher646cccb2016-10-26 16:41:39 -0400501 strcpy(fw_name, "amdgpu/tonga_smc.bin");
Mykola Lysenko0b455412016-03-30 05:50:11 -0400502 break;
503 case CHIP_FIJI:
504 strcpy(fw_name, "amdgpu/fiji_smc.bin");
505 break;
506 case CHIP_POLARIS11:
Alex Deuchera52d1202017-02-08 22:35:51 -0500507 if (type == CGS_UCODE_ID_SMU) {
508 if (((adev->pdev->device == 0x67ef) &&
509 ((adev->pdev->revision == 0xe0) ||
510 (adev->pdev->revision == 0xe2) ||
511 (adev->pdev->revision == 0xe5))) ||
512 ((adev->pdev->device == 0x67ff) &&
513 ((adev->pdev->revision == 0xcf) ||
514 (adev->pdev->revision == 0xef) ||
Huang Rui5d7213b2017-02-10 16:42:19 +0800515 (adev->pdev->revision == 0xff)))) {
516 info->is_kicker = true;
Alex Deuchera52d1202017-02-08 22:35:51 -0500517 strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
Huang Rui5d7213b2017-02-10 16:42:19 +0800518 } else
Alex Deuchera52d1202017-02-08 22:35:51 -0500519 strcpy(fw_name, "amdgpu/polaris11_smc.bin");
520 } else if (type == CGS_UCODE_ID_SMU_SK) {
Mykola Lysenko0b455412016-03-30 05:50:11 -0400521 strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
Alex Deuchera52d1202017-02-08 22:35:51 -0500522 }
Mykola Lysenko0b455412016-03-30 05:50:11 -0400523 break;
524 case CHIP_POLARIS10:
Alex Deuchera52d1202017-02-08 22:35:51 -0500525 if (type == CGS_UCODE_ID_SMU) {
526 if ((adev->pdev->device == 0x67df) &&
527 ((adev->pdev->revision == 0xe0) ||
528 (adev->pdev->revision == 0xe3) ||
529 (adev->pdev->revision == 0xe4) ||
530 (adev->pdev->revision == 0xe5) ||
531 (adev->pdev->revision == 0xe7) ||
Huang Rui5d7213b2017-02-10 16:42:19 +0800532 (adev->pdev->revision == 0xef))) {
533 info->is_kicker = true;
Alex Deuchera52d1202017-02-08 22:35:51 -0500534 strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
Huang Rui5d7213b2017-02-10 16:42:19 +0800535 } else
Alex Deuchera52d1202017-02-08 22:35:51 -0500536 strcpy(fw_name, "amdgpu/polaris10_smc.bin");
537 } else if (type == CGS_UCODE_ID_SMU_SK) {
Mykola Lysenko0b455412016-03-30 05:50:11 -0400538 strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
Alex Deuchera52d1202017-02-08 22:35:51 -0500539 }
Mykola Lysenko0b455412016-03-30 05:50:11 -0400540 break;
Junwei Zhangc4642a42016-12-14 15:32:28 -0500541 case CHIP_POLARIS12:
542 strcpy(fw_name, "amdgpu/polaris12_smc.bin");
543 break;
Ken Wang220ab9b2017-03-06 14:49:53 -0500544 case CHIP_VEGA10:
Evan Quan747f6c92017-06-23 15:08:15 +0800545 if ((adev->pdev->device == 0x687f) &&
546 ((adev->pdev->revision == 0xc0) ||
547 (adev->pdev->revision == 0xc1) ||
548 (adev->pdev->revision == 0xc3)))
549 strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
550 else
551 strcpy(fw_name, "amdgpu/vega10_smc.bin");
Ken Wang220ab9b2017-03-06 14:49:53 -0500552 break;
Hawking Zhangad0ad9a2018-03-19 14:11:09 -0500553 case CHIP_VEGA12:
554 strcpy(fw_name, "amdgpu/vega12_smc.bin");
555 break;
Mykola Lysenko0b455412016-03-30 05:50:11 -0400556 default:
557 DRM_ERROR("SMC firmware not supported\n");
558 return -EINVAL;
559 }
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800560
Mykola Lysenko0b455412016-03-30 05:50:11 -0400561 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
562 if (err) {
563 DRM_ERROR("Failed to request firmware\n");
564 return err;
565 }
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800566
Mykola Lysenko0b455412016-03-30 05:50:11 -0400567 err = amdgpu_ucode_validate(adev->pm.fw);
568 if (err) {
569 DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
570 release_firmware(adev->pm.fw);
571 adev->pm.fw = NULL;
572 return err;
573 }
Huang Ruid1de1ed2017-02-16 11:53:38 +0800574
575 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
576 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
577 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
578 ucode->fw = adev->pm.fw;
579 header = (const struct common_firmware_header *)ucode->fw->data;
580 adev->firmware.fw_size +=
581 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
582 }
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800583 }
584
585 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
yanyang1c66875b2016-05-30 15:30:54 +0800586 amdgpu_ucode_print_smc_hdr(&hdr->header);
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800587 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
588 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
589 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
590 src = (const uint8_t *)(adev->pm.fw->data +
591 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
592
593 info->version = adev->pm.fw_version;
594 info->image_size = ucode_size;
Huang Rui340efe22016-06-19 23:55:14 +0800595 info->ucode_start_address = ucode_start_address;
Jammy Zhoubf3911b02015-05-13 18:58:05 +0800596 info->kptr = (void *)src;
597 }
598 return 0;
599}
600
Frank Minac00bbf2016-04-27 20:04:58 +0800601static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
602{
603 CGS_FUNC_ADEV;
604 return amdgpu_sriov_vf(adev);
605}
606
Dave Airlie110e6f22016-04-12 13:25:48 +1000607static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
Rex Zhu47bf18b2015-09-17 16:34:14 +0800608 struct cgs_display_info *info)
609{
610 CGS_FUNC_ADEV;
Rex Zhuf9e9c082016-03-29 13:21:59 +0800611 struct cgs_mode_info *mode_info;
Rex Zhu47bf18b2015-09-17 16:34:14 +0800612
613 if (info == NULL)
614 return -EINVAL;
615
Rex Zhuf9e9c082016-03-29 13:21:59 +0800616 mode_info = info->mode_info;
Rex Zhu25380902018-03-16 16:56:58 +0800617 if (mode_info)
Alex Deucherbeb37772017-06-29 16:08:49 -0400618 /* if the displays are off, vblank time is max */
619 mode_info->vblank_time_us = 0xffffffff;
Rex Zhuf9e9c082016-03-29 13:21:59 +0800620
Rex Zhub7ad57b2017-02-28 16:21:27 +0800621 if (!amdgpu_device_has_dc_support(adev)) {
622 struct amdgpu_crtc *amdgpu_crtc;
623 struct drm_device *ddev = adev->ddev;
624 struct drm_crtc *crtc;
625 uint32_t line_time_us, vblank_lines;
626
627 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
628 list_for_each_entry(crtc,
629 &ddev->mode_config.crtc_list, head) {
630 amdgpu_crtc = to_amdgpu_crtc(crtc);
631 if (crtc->enabled) {
632 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
633 info->display_count++;
634 }
635 if (mode_info != NULL &&
636 crtc->enabled && amdgpu_crtc->enabled &&
637 amdgpu_crtc->hw_mode.clock) {
638 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
639 amdgpu_crtc->hw_mode.clock;
640 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
641 amdgpu_crtc->hw_mode.crtc_vdisplay +
642 (amdgpu_crtc->v_border * 2);
643 mode_info->vblank_time_us = vblank_lines * line_time_us;
644 mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
Alex Deucher7a572b12018-02-13 14:33:51 -0500645 /* we have issues with mclk switching with refresh rates
646 * over 120 hz on the non-DC code.
647 */
648 if (mode_info->refresh_rate > 120)
649 mode_info->vblank_time_us = 0;
Rex Zhub7ad57b2017-02-28 16:21:27 +0800650 mode_info = NULL;
651 }
Rex Zhu47bf18b2015-09-17 16:34:14 +0800652 }
653 }
Rex Zhub7ad57b2017-02-28 16:21:27 +0800654 } else {
655 info->display_count = adev->pm.pm_display_cfg.num_display;
656 if (mode_info != NULL) {
657 mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
658 mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
Rex Zhub7ad57b2017-02-28 16:21:27 +0800659 }
Rex Zhu47bf18b2015-09-17 16:34:14 +0800660 }
Rex Zhu47bf18b2015-09-17 16:34:14 +0800661 return 0;
662}
663
Rex Zhu4c900802016-03-29 14:20:37 +0800664
Dave Airlie110e6f22016-04-12 13:25:48 +1000665static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
Rex Zhu4c900802016-03-29 14:20:37 +0800666{
667 CGS_FUNC_ADEV;
668
669 adev->pm.dpm_enabled = enabled;
670
671 return 0;
672}
673
Chunming Zhoud03846a2015-07-28 14:20:03 -0400674static const struct cgs_ops amdgpu_cgs_ops = {
Kees Cook613e61a2016-12-16 17:02:32 -0800675 .read_register = amdgpu_cgs_read_register,
676 .write_register = amdgpu_cgs_write_register,
677 .read_ind_register = amdgpu_cgs_read_ind_register,
678 .write_ind_register = amdgpu_cgs_write_ind_register,
Kees Cook613e61a2016-12-16 17:02:32 -0800679 .get_pci_resource = amdgpu_cgs_get_pci_resource,
680 .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
681 .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
682 .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
Kees Cook613e61a2016-12-16 17:02:32 -0800683 .get_firmware_info = amdgpu_cgs_get_firmware_info,
684 .rel_firmware = amdgpu_cgs_rel_firmware,
685 .set_powergating_state = amdgpu_cgs_set_powergating_state,
686 .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
687 .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
688 .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
Kees Cook613e61a2016-12-16 17:02:32 -0800689 .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
Rex Zhue8a95b22016-12-21 20:30:58 +0800690 .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
Evan Quan209ee272017-07-04 15:37:09 +0800691 .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
Chunming Zhoud03846a2015-07-28 14:20:03 -0400692};
693
Dave Airlie110e6f22016-04-12 13:25:48 +1000694struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
Chunming Zhoud03846a2015-07-28 14:20:03 -0400695{
696 struct amdgpu_cgs_device *cgs_device =
697 kmalloc(sizeof(*cgs_device), GFP_KERNEL);
698
699 if (!cgs_device) {
700 DRM_ERROR("Couldn't allocate CGS device structure\n");
701 return NULL;
702 }
703
704 cgs_device->base.ops = &amdgpu_cgs_ops;
Chunming Zhoud03846a2015-07-28 14:20:03 -0400705 cgs_device->adev = adev;
706
Dave Airlie110e6f22016-04-12 13:25:48 +1000707 return (struct cgs_device *)cgs_device;
Chunming Zhoud03846a2015-07-28 14:20:03 -0400708}
709
Dave Airlie110e6f22016-04-12 13:25:48 +1000710void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
Chunming Zhoud03846a2015-07-28 14:20:03 -0400711{
712 kfree(cgs_device);
713}