blob: 68a7c2a2f987ef30b8afebd1ca2faf934a9b7f45 [file] [log] [blame]
Archana Srirama55a2132018-02-09 17:50:19 +05301/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <soc/qcom/subsystem_restart.h>
15#include <soc/qcom/scm.h>
16#include <linux/pm_opp.h>
17#include <linux/clk/qcom.h>
18
19#include "adreno.h"
20#include "a5xx_reg.h"
21#include "adreno_a5xx.h"
22#include "adreno_cp_parser.h"
23#include "adreno_trace.h"
24#include "adreno_pm4types.h"
25#include "adreno_perfcounter.h"
26#include "adreno_ringbuffer.h"
27#include "kgsl_sharedmem.h"
28#include "kgsl_log.h"
29#include "kgsl.h"
30#include "kgsl_trace.h"
31#include "adreno_a5xx_packets.h"
32
Shrenuj Bansala419c792016-10-20 14:05:11 -070033static int critical_packet_constructed;
34
35static struct kgsl_memdesc crit_pkts;
36static unsigned int crit_pkts_dwords;
37static struct kgsl_memdesc crit_pkts_refbuf0;
38static struct kgsl_memdesc crit_pkts_refbuf1;
39static struct kgsl_memdesc crit_pkts_refbuf2;
40static struct kgsl_memdesc crit_pkts_refbuf3;
41
42static const struct adreno_vbif_data a530_vbif[] = {
43 {A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003},
44 {0, 0},
45};
46
47static const struct adreno_vbif_data a540_vbif[] = {
48 {A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003},
49 {A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009},
50 {0, 0},
51};
52
53static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
54 { adreno_is_a540, a540_vbif },
55 { adreno_is_a530, a530_vbif },
56 { adreno_is_a512, a540_vbif },
57 { adreno_is_a510, a530_vbif },
Rajesh Kemisettid7671bc2017-06-07 17:21:13 +053058 { adreno_is_a508, a530_vbif },
Hareesh Gundu0891bd92018-04-12 14:46:08 +053059 { adreno_is_a504, a530_vbif },
Shrenuj Bansala419c792016-10-20 14:05:11 -070060 { adreno_is_a505, a530_vbif },
61 { adreno_is_a506, a530_vbif },
62};
63
64static void a5xx_irq_storm_worker(struct work_struct *work);
Archana Obannagari29ece762017-09-18 12:49:24 +053065static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
66 uint32_t id, uint32_t major, uint32_t minor);
Shrenuj Bansala419c792016-10-20 14:05:11 -070067static void a5xx_gpmu_reset(struct work_struct *work);
68static int a5xx_gpmu_init(struct adreno_device *adreno_dev);
69
70/**
71 * Number of times to check if the regulator enabled before
72 * giving up and returning failure.
73 */
74#define PWR_RETRY 100
75
76/**
77 * Number of times to check if the GPMU firmware is initialized before
78 * giving up and returning failure.
79 */
80#define GPMU_FW_INIT_RETRY 5000
81
82#define A530_QFPROM_RAW_PTE_ROW0_MSB 0x134
83#define A530_QFPROM_RAW_PTE_ROW2_MSB 0x144
84
Shrenuj Bansala419c792016-10-20 14:05:11 -070085static void a530_efuse_leakage(struct adreno_device *adreno_dev)
86{
87 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
88 unsigned int row0, row2;
89 unsigned int multiplier, gfx_active, leakage_pwr_on, coeff;
90
91 adreno_efuse_read_u32(adreno_dev,
92 A530_QFPROM_RAW_PTE_ROW0_MSB, &row0);
93
94 adreno_efuse_read_u32(adreno_dev,
95 A530_QFPROM_RAW_PTE_ROW2_MSB, &row2);
96
97 multiplier = (row0 >> 1) & 0x3;
98 gfx_active = (row2 >> 2) & 0xFF;
99
100 if (of_property_read_u32(device->pdev->dev.of_node,
101 "qcom,base-leakage-coefficient", &coeff))
102 return;
103
104 leakage_pwr_on = gfx_active * (1 << multiplier);
105
106 adreno_dev->lm_leakage = (leakage_pwr_on << 16) |
107 ((leakage_pwr_on * coeff) / 100);
108}
109
110static void a530_efuse_speed_bin(struct adreno_device *adreno_dev)
111{
112 unsigned int val;
113 unsigned int speed_bin[3];
114 struct kgsl_device *device = &adreno_dev->dev;
115
116 if (of_property_read_u32_array(device->pdev->dev.of_node,
117 "qcom,gpu-speed-bin", speed_bin, 3))
118 return;
119
120 adreno_efuse_read_u32(adreno_dev, speed_bin[0], &val);
121
122 adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
123}
124
Sunil Khatri93c50b82018-04-23 14:35:28 +0530125static void a5xx_efuse_speed_bin(struct adreno_device *adreno_dev)
126{
127 unsigned int val;
128 unsigned int speed_bin[3];
129 struct kgsl_device *device = &adreno_dev->dev;
130
131 if (of_get_property(device->pdev->dev.of_node,
132 "qcom,gpu-speed-bin-vectors", NULL)) {
133 adreno_efuse_speed_bin_array(adreno_dev);
134 return;
135 }
136
137 if (!of_property_read_u32_array(device->pdev->dev.of_node,
138 "qcom,gpu-speed-bin", speed_bin, 3)) {
139 adreno_efuse_read_u32(adreno_dev, speed_bin[0], &val);
140 adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
141 return;
142 }
143}
144
Shrenuj Bansala419c792016-10-20 14:05:11 -0700145static const struct {
146 int (*check)(struct adreno_device *adreno_dev);
147 void (*func)(struct adreno_device *adreno_dev);
148} a5xx_efuse_funcs[] = {
149 { adreno_is_a530, a530_efuse_leakage },
150 { adreno_is_a530, a530_efuse_speed_bin },
Sunil Khatri93c50b82018-04-23 14:35:28 +0530151 { adreno_is_a504, a5xx_efuse_speed_bin },
152 { adreno_is_a505, a5xx_efuse_speed_bin },
Deepak Kumare5924d72017-01-31 22:36:49 +0530153 { adreno_is_a512, a530_efuse_speed_bin },
Deepak Kumaradd273a2017-02-09 11:05:24 +0530154 { adreno_is_a508, a530_efuse_speed_bin },
Shrenuj Bansala419c792016-10-20 14:05:11 -0700155};
156
157static void a5xx_check_features(struct adreno_device *adreno_dev)
158{
159 unsigned int i;
160
161 if (adreno_efuse_map(adreno_dev))
162 return;
163
164 for (i = 0; i < ARRAY_SIZE(a5xx_efuse_funcs); i++) {
165 if (a5xx_efuse_funcs[i].check(adreno_dev))
166 a5xx_efuse_funcs[i].func(adreno_dev);
167 }
168
169 adreno_efuse_unmap(adreno_dev);
170}
171
172static void a5xx_platform_setup(struct adreno_device *adreno_dev)
173{
174 uint64_t addr;
175 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
176
Hareesh Gundu0891bd92018-04-12 14:46:08 +0530177 if (adreno_is_a504_to_a506(adreno_dev) || adreno_is_a508(adreno_dev)) {
Shrenuj Bansala419c792016-10-20 14:05:11 -0700178 gpudev->snapshot_data->sect_sizes->cp_meq = 32;
179 gpudev->snapshot_data->sect_sizes->cp_merciu = 1024;
180 gpudev->snapshot_data->sect_sizes->roq = 256;
181
182 /* A505 & A506 having 3 XIN ports in VBIF */
183 gpudev->vbif_xin_halt_ctrl0_mask =
184 A510_VBIF_XIN_HALT_CTRL0_MASK;
185 } else if (adreno_is_a510(adreno_dev)) {
186 gpudev->snapshot_data->sect_sizes->cp_meq = 32;
187 gpudev->snapshot_data->sect_sizes->cp_merciu = 32;
188 gpudev->snapshot_data->sect_sizes->roq = 256;
189
190 /* A510 has 3 XIN ports in VBIF */
191 gpudev->vbif_xin_halt_ctrl0_mask =
192 A510_VBIF_XIN_HALT_CTRL0_MASK;
Rajesh Kemisetti29dd66c2017-01-15 18:59:29 +0530193 } else if (adreno_is_a540(adreno_dev) ||
194 adreno_is_a512(adreno_dev)) {
Shrenuj Bansala419c792016-10-20 14:05:11 -0700195 gpudev->snapshot_data->sect_sizes->cp_merciu = 1024;
196 }
197
198 /* Calculate SP local and private mem addresses */
199 addr = ALIGN(ADRENO_UCHE_GMEM_BASE + adreno_dev->gmem_size, SZ_64K);
200 adreno_dev->sp_local_gpuaddr = addr;
201 adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
202
203 /* Setup defaults that might get changed by the fuse bits */
204 adreno_dev->lm_leakage = A530_DEFAULT_LEAKAGE;
205 adreno_dev->speed_bin = 0;
206
207 /* Check efuse bits for various capabilties */
208 a5xx_check_features(adreno_dev);
209}
210
211static void a5xx_critical_packet_destroy(struct adreno_device *adreno_dev)
212{
213 kgsl_free_global(&adreno_dev->dev, &crit_pkts);
214 kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf1);
215 kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf2);
216 kgsl_free_global(&adreno_dev->dev, &crit_pkts_refbuf3);
217
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600218 kgsl_iommu_unmap_global_secure_pt_entry(KGSL_DEVICE(adreno_dev),
219 &crit_pkts_refbuf0);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700220 kgsl_sharedmem_free(&crit_pkts_refbuf0);
221
222}
223
224static void _do_fixup(const struct adreno_critical_fixup *fixups, int count,
225 uint64_t *gpuaddrs, unsigned int *buffer)
226{
227 int i;
228
229 for (i = 0; i < count; i++) {
230 buffer[fixups[i].lo_offset] =
231 lower_32_bits(gpuaddrs[fixups[i].buffer]) |
232 fixups[i].mem_offset;
233
234 buffer[fixups[i].hi_offset] =
235 upper_32_bits(gpuaddrs[fixups[i].buffer]);
236 }
237}
238
239static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev)
240{
241
242 unsigned int *cmds;
243 uint64_t gpuaddrs[CRITICAL_PACKET_MAX];
244 int ret;
245
246 ret = kgsl_allocate_global(&adreno_dev->dev,
247 &crit_pkts, PAGE_SIZE,
248 KGSL_MEMFLAGS_GPUREADONLY,
249 0, "crit_pkts");
250 if (ret)
251 return ret;
252
253 ret = kgsl_allocate_user(&adreno_dev->dev, &crit_pkts_refbuf0,
254 PAGE_SIZE, KGSL_MEMFLAGS_SECURE);
255 if (ret)
256 return ret;
257
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600258 ret = kgsl_iommu_map_global_secure_pt_entry(&adreno_dev->dev,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700259 &crit_pkts_refbuf0);
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600260 if (ret)
261 return ret;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700262
263 ret = kgsl_allocate_global(&adreno_dev->dev,
264 &crit_pkts_refbuf1,
265 PAGE_SIZE, 0, 0, "crit_pkts_refbuf1");
266 if (ret)
267 return ret;
268
269 ret = kgsl_allocate_global(&adreno_dev->dev,
270 &crit_pkts_refbuf2,
271 PAGE_SIZE, 0, 0, "crit_pkts_refbuf2");
272 if (ret)
273 return ret;
274
275 ret = kgsl_allocate_global(&adreno_dev->dev,
276 &crit_pkts_refbuf3,
277 PAGE_SIZE, 0, 0, "crit_pkts_refbuf3");
278 if (ret)
279 return ret;
280
281 cmds = crit_pkts.hostptr;
282
283 gpuaddrs[CRITICAL_PACKET0] = crit_pkts_refbuf0.gpuaddr;
284 gpuaddrs[CRITICAL_PACKET1] = crit_pkts_refbuf1.gpuaddr;
285 gpuaddrs[CRITICAL_PACKET2] = crit_pkts_refbuf2.gpuaddr;
286 gpuaddrs[CRITICAL_PACKET3] = crit_pkts_refbuf3.gpuaddr;
287
288 crit_pkts_dwords = ARRAY_SIZE(_a5xx_critical_pkts);
289
290 memcpy(cmds, _a5xx_critical_pkts, crit_pkts_dwords << 2);
291
292 _do_fixup(critical_pkt_fixups, ARRAY_SIZE(critical_pkt_fixups),
293 gpuaddrs, cmds);
294
295 cmds = crit_pkts_refbuf1.hostptr;
296 memcpy(cmds, _a5xx_critical_pkts_mem01,
297 ARRAY_SIZE(_a5xx_critical_pkts_mem01) << 2);
298
299 cmds = crit_pkts_refbuf2.hostptr;
300 memcpy(cmds, _a5xx_critical_pkts_mem02,
301 ARRAY_SIZE(_a5xx_critical_pkts_mem02) << 2);
302
303 cmds = crit_pkts_refbuf3.hostptr;
304 memcpy(cmds, _a5xx_critical_pkts_mem03,
305 ARRAY_SIZE(_a5xx_critical_pkts_mem03) << 2);
306
307 _do_fixup(critical_pkt_mem03_fixups,
308 ARRAY_SIZE(critical_pkt_mem03_fixups), gpuaddrs, cmds);
309
310 critical_packet_constructed = 1;
311
312 return 0;
313}
314
315static void a5xx_init(struct adreno_device *adreno_dev)
316{
317 if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
318 INIT_WORK(&adreno_dev->gpmu_work, a5xx_gpmu_reset);
319
320 INIT_WORK(&adreno_dev->irq_storm_work, a5xx_irq_storm_worker);
321
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600322 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) {
323 int ret;
324
325 ret = a5xx_critical_packet_construct(adreno_dev);
326 if (ret)
327 a5xx_critical_packet_destroy(adreno_dev);
328 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700329
330 a5xx_crashdump_init(adreno_dev);
331}
332
333static void a5xx_remove(struct adreno_device *adreno_dev)
334{
335 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS))
336 a5xx_critical_packet_destroy(adreno_dev);
337}
338
339/**
340 * a5xx_protect_init() - Initializes register protection on a5xx
341 * @device: Pointer to the device structure
342 * Performs register writes to enable protected access to sensitive
343 * registers
344 */
345static void a5xx_protect_init(struct adreno_device *adreno_dev)
346{
347 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
348 int index = 0;
349 struct kgsl_protected_registers *iommu_regs;
350
351 /* enable access protection to privileged registers */
352 kgsl_regwrite(device, A5XX_CP_PROTECT_CNTL, 0x00000007);
353
354 /* RBBM registers */
355 adreno_set_protected_registers(adreno_dev, &index, 0x4, 2);
356 adreno_set_protected_registers(adreno_dev, &index, 0x8, 3);
357 adreno_set_protected_registers(adreno_dev, &index, 0x10, 4);
358 adreno_set_protected_registers(adreno_dev, &index, 0x20, 5);
359 adreno_set_protected_registers(adreno_dev, &index, 0x40, 6);
360 adreno_set_protected_registers(adreno_dev, &index, 0x80, 6);
361
362 /* Content protection registers */
363 adreno_set_protected_registers(adreno_dev, &index,
364 A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 4);
365 adreno_set_protected_registers(adreno_dev, &index,
366 A5XX_RBBM_SECVID_TRUST_CNTL, 1);
367
368 /* CP registers */
369 adreno_set_protected_registers(adreno_dev, &index, 0x800, 6);
370 adreno_set_protected_registers(adreno_dev, &index, 0x840, 3);
371 adreno_set_protected_registers(adreno_dev, &index, 0x880, 5);
372 adreno_set_protected_registers(adreno_dev, &index, 0x0AA0, 0);
373
374 /* RB registers */
375 adreno_set_protected_registers(adreno_dev, &index, 0xCC0, 0);
376 adreno_set_protected_registers(adreno_dev, &index, 0xCF0, 1);
377
378 /* VPC registers */
379 adreno_set_protected_registers(adreno_dev, &index, 0xE68, 3);
380 adreno_set_protected_registers(adreno_dev, &index, 0xE70, 4);
381
382 /* UCHE registers */
383 adreno_set_protected_registers(adreno_dev, &index, 0xE80, ilog2(16));
384
385 /* SMMU registers */
386 iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
387 if (iommu_regs)
388 adreno_set_protected_registers(adreno_dev, &index,
Lynus Vaz607a42d2018-05-23 20:26:51 +0530389 iommu_regs->base, ilog2(iommu_regs->range));
Shrenuj Bansala419c792016-10-20 14:05:11 -0700390}
391
392/*
393 * a5xx_is_sptp_idle() - A530 SP/TP/RAC should be power collapsed to be
394 * considered idle
395 * @adreno_dev: The adreno_device pointer
396 */
397static bool a5xx_is_sptp_idle(struct adreno_device *adreno_dev)
398{
399 unsigned int reg;
400 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
401
402 /* If feature is not supported or enabled, no worry */
403 if (!ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC) ||
404 !test_bit(ADRENO_SPTP_PC_CTRL, &adreno_dev->pwrctrl_flag))
405 return true;
406 kgsl_regread(device, A5XX_GPMU_SP_PWR_CLK_STATUS, &reg);
407 if (reg & BIT(20))
408 return false;
409 kgsl_regread(device, A5XX_GPMU_RBCCU_PWR_CLK_STATUS, &reg);
410 return !(reg & BIT(20));
411}
412
413/*
414 * _poll_gdsc_status() - Poll the GDSC status register
415 * @adreno_dev: The adreno device pointer
416 * @status_reg: Offset of the status register
417 * @status_value: The expected bit value
418 *
419 * Poll the status register till the power-on bit is equal to the
420 * expected value or the max retries are exceeded.
421 */
422static int _poll_gdsc_status(struct adreno_device *adreno_dev,
423 unsigned int status_reg,
424 unsigned int status_value)
425{
426 unsigned int reg, retry = PWR_RETRY;
427
428 /* Bit 20 is the power on bit of SPTP and RAC GDSC status register */
429 do {
430 udelay(1);
431 kgsl_regread(KGSL_DEVICE(adreno_dev), status_reg, &reg);
432 } while (((reg & BIT(20)) != (status_value << 20)) && retry--);
433 if ((reg & BIT(20)) != (status_value << 20))
434 return -ETIMEDOUT;
435 return 0;
436}
437
438static void a5xx_restore_isense_regs(struct adreno_device *adreno_dev)
439{
440 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
441 unsigned int reg, i, ramp = GPMU_ISENSE_SAVE;
442 static unsigned int isense_regs[6] = {0xFFFF}, isense_reg_addr[] = {
443 A5XX_GPU_CS_DECIMAL_ALIGN,
444 A5XX_GPU_CS_SENSOR_PARAM_CORE_1,
445 A5XX_GPU_CS_SENSOR_PARAM_CORE_2,
446 A5XX_GPU_CS_SW_OV_FUSE_EN,
447 A5XX_GPU_CS_ENDPOINT_CALIBRATION_DONE,
448 A5XX_GPMU_TEMP_SENSOR_CONFIG};
449
450 if (!adreno_is_a540(adreno_dev))
451 return;
452
453 /* read signature */
454 kgsl_regread(device, ramp++, &reg);
455
456 if (reg == 0xBABEFACE) {
457 /* store memory locations in buffer */
458 for (i = 0; i < ARRAY_SIZE(isense_regs); i++)
459 kgsl_regread(device, ramp + i, isense_regs + i);
460
461 /* clear signature */
462 kgsl_regwrite(device, GPMU_ISENSE_SAVE, 0x0);
463 }
464
465 /* if we never stored memory locations - do nothing */
466 if (isense_regs[0] == 0xFFFF)
467 return;
468
469 /* restore registers from memory */
470 for (i = 0; i < ARRAY_SIZE(isense_reg_addr); i++)
471 kgsl_regwrite(device, isense_reg_addr[i], isense_regs[i]);
472
473}
474
475/*
476 * a5xx_regulator_enable() - Enable any necessary HW regulators
477 * @adreno_dev: The adreno device pointer
478 *
479 * Some HW blocks may need their regulators explicitly enabled
480 * on a restart. Clocks must be on during this call.
481 */
482static int a5xx_regulator_enable(struct adreno_device *adreno_dev)
483{
484 unsigned int ret;
485 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
486
487 if (!(adreno_is_a530(adreno_dev) || adreno_is_a540(adreno_dev))) {
488 /* Halt the sp_input_clk at HM level */
489 kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL, 0x00000055);
490 a5xx_hwcg_set(adreno_dev, true);
491 /* Turn on sp_input_clk at HM level */
Rajesh Kemisetti29dd66c2017-01-15 18:59:29 +0530492 kgsl_regrmw(device, A5XX_RBBM_CLOCK_CNTL, 0xFF, 0);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700493 return 0;
494 }
495
496 /*
497 * Turn on smaller power domain first to reduce voltage droop.
498 * Set the default register values; set SW_COLLAPSE to 0.
499 */
500 kgsl_regwrite(device, A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
501 /* Insert a delay between RAC and SPTP GDSC to reduce voltage droop */
502 udelay(3);
503 ret = _poll_gdsc_status(adreno_dev, A5XX_GPMU_RBCCU_PWR_CLK_STATUS, 1);
504 if (ret) {
505 KGSL_PWR_ERR(device, "RBCCU GDSC enable failed\n");
506 return ret;
507 }
508
509 kgsl_regwrite(device, A5XX_GPMU_SP_POWER_CNTL, 0x778000);
510 ret = _poll_gdsc_status(adreno_dev, A5XX_GPMU_SP_PWR_CLK_STATUS, 1);
511 if (ret) {
512 KGSL_PWR_ERR(device, "SPTP GDSC enable failed\n");
513 return ret;
514 }
515
516 /* Disable SP clock */
517 kgsl_regrmw(device, A5XX_GPMU_GPMU_SP_CLOCK_CONTROL,
518 CNTL_IP_CLK_ENABLE, 0);
519 /* Enable hardware clockgating */
520 a5xx_hwcg_set(adreno_dev, true);
521 /* Enable SP clock */
522 kgsl_regrmw(device, A5XX_GPMU_GPMU_SP_CLOCK_CONTROL,
523 CNTL_IP_CLK_ENABLE, 1);
524
525 a5xx_restore_isense_regs(adreno_dev);
526 return 0;
527}
528
529/*
530 * a5xx_regulator_disable() - Disable any necessary HW regulators
531 * @adreno_dev: The adreno device pointer
532 *
533 * Some HW blocks may need their regulators explicitly disabled
534 * on a power down to prevent current spikes. Clocks must be on
535 * during this call.
536 */
537static void a5xx_regulator_disable(struct adreno_device *adreno_dev)
538{
539 unsigned int reg;
540 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
541
Rajesh Kemisettiaed6ec72017-02-06 09:37:00 +0530542 if (adreno_is_a512(adreno_dev) || adreno_is_a508(adreno_dev))
Rajesh Kemisetti29dd66c2017-01-15 18:59:29 +0530543 return;
544
Shrenuj Bansala419c792016-10-20 14:05:11 -0700545 /* If feature is not supported or not enabled */
546 if (!ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC) ||
547 !test_bit(ADRENO_SPTP_PC_CTRL, &adreno_dev->pwrctrl_flag)) {
548 /* Set the default register values; set SW_COLLAPSE to 1 */
549 kgsl_regwrite(device, A5XX_GPMU_SP_POWER_CNTL, 0x778001);
550 /*
551 * Insert a delay between SPTP and RAC GDSC to reduce voltage
552 * droop.
553 */
554 udelay(3);
555 if (_poll_gdsc_status(adreno_dev,
556 A5XX_GPMU_SP_PWR_CLK_STATUS, 0))
557 KGSL_PWR_WARN(device, "SPTP GDSC disable failed\n");
558
559 kgsl_regwrite(device, A5XX_GPMU_RBCCU_POWER_CNTL, 0x778001);
560 if (_poll_gdsc_status(adreno_dev,
561 A5XX_GPMU_RBCCU_PWR_CLK_STATUS, 0))
562 KGSL_PWR_WARN(device, "RBCCU GDSC disable failed\n");
563 } else if (test_bit(ADRENO_DEVICE_GPMU_INITIALIZED,
564 &adreno_dev->priv)) {
565 /* GPMU firmware is supposed to turn off SPTP & RAC GDSCs. */
566 kgsl_regread(device, A5XX_GPMU_SP_PWR_CLK_STATUS, &reg);
567 if (reg & BIT(20))
568 KGSL_PWR_WARN(device, "SPTP GDSC is not disabled\n");
569 kgsl_regread(device, A5XX_GPMU_RBCCU_PWR_CLK_STATUS, &reg);
570 if (reg & BIT(20))
571 KGSL_PWR_WARN(device, "RBCCU GDSC is not disabled\n");
572 /*
573 * GPMU firmware is supposed to set GMEM to non-retention.
574 * Bit 14 is the memory core force on bit.
575 */
576 kgsl_regread(device, A5XX_GPMU_RBCCU_CLOCK_CNTL, &reg);
577 if (reg & BIT(14))
578 KGSL_PWR_WARN(device, "GMEM is forced on\n");
579 }
580
581 if (adreno_is_a530(adreno_dev)) {
582 /* Reset VBIF before PC to avoid popping bogus FIFO entries */
583 kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD,
584 0x003C0000);
585 kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD, 0);
586 }
587}
588
589/*
590 * a5xx_enable_pc() - Enable the GPMU based power collapse of the SPTP and RAC
591 * blocks
592 * @adreno_dev: The adreno device pointer
593 */
594static void a5xx_enable_pc(struct adreno_device *adreno_dev)
595{
596 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
597
598 if (!ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC) ||
599 !test_bit(ADRENO_SPTP_PC_CTRL, &adreno_dev->pwrctrl_flag))
600 return;
601
602 kgsl_regwrite(device, A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x0000007F);
603 kgsl_regwrite(device, A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
604 kgsl_regwrite(device, A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0x000A0080);
605 kgsl_regwrite(device, A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x00600040);
606
607 trace_adreno_sp_tp((unsigned long) __builtin_return_address(0));
608};
609
610/*
611 * The maximum payload of a type4 packet is the max size minus one for the
612 * opcode
613 */
614#define TYPE4_MAX_PAYLOAD (PM4_TYPE4_PKT_SIZE_MAX - 1)
615
616static int _gpmu_create_load_cmds(struct adreno_device *adreno_dev,
617 uint32_t *ucode, uint32_t size)
618{
619 uint32_t *start, *cmds;
620 uint32_t offset = 0;
621 uint32_t cmds_size = size;
622
623 /* Add a dword for each PM4 packet */
624 cmds_size += (size / TYPE4_MAX_PAYLOAD) + 1;
625
626 /* Add 4 dwords for the protected mode */
627 cmds_size += 4;
628
629 if (adreno_dev->gpmu_cmds != NULL)
630 return 0;
631
632 adreno_dev->gpmu_cmds = kmalloc(cmds_size << 2, GFP_KERNEL);
633 if (adreno_dev->gpmu_cmds == NULL)
634 return -ENOMEM;
635
636 cmds = adreno_dev->gpmu_cmds;
637 start = cmds;
638
639 /* Turn CP protection OFF */
640 *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
641 *cmds++ = 0;
642
643 /*
644 * Prebuild the cmd stream to send to the GPU to load
645 * the GPMU firmware
646 */
647 while (size > 0) {
648 int tmp_size = size;
649
650 if (size >= TYPE4_MAX_PAYLOAD)
651 tmp_size = TYPE4_MAX_PAYLOAD;
652
653 *cmds++ = cp_type4_packet(
654 A5XX_GPMU_INST_RAM_BASE + offset,
655 tmp_size);
656
657 memcpy(cmds, &ucode[offset], tmp_size << 2);
658
659 cmds += tmp_size;
660 offset += tmp_size;
661 size -= tmp_size;
662 }
663
664 /* Turn CP protection ON */
665 *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
666 *cmds++ = 1;
667
668 adreno_dev->gpmu_cmds_size = (size_t) (cmds - start);
669
670 return 0;
671}
672
673
674/*
675 * _load_gpmu_firmware() - Load the ucode into the GPMU RAM
676 * @adreno_dev: Pointer to adreno device
677 */
678static int _load_gpmu_firmware(struct adreno_device *adreno_dev)
679{
680 uint32_t *data;
681 const struct firmware *fw = NULL;
682 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
683 const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
684 uint32_t *cmds, cmd_size;
685 int ret = -EINVAL;
686
687 if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
688 return 0;
689
690 /* gpmu fw already saved and verified so do nothing new */
691 if (adreno_dev->gpmu_cmds_size != 0)
692 return 0;
693
694 if (gpucore->gpmufw_name == NULL)
695 return 0;
696
697 ret = request_firmware(&fw, gpucore->gpmufw_name, device->dev);
698 if (ret || fw == NULL) {
699 KGSL_CORE_ERR("request_firmware (%s) failed: %d\n",
700 gpucore->gpmufw_name, ret);
701 return ret;
702 }
703
704 data = (uint32_t *)fw->data;
705
706 if (data[0] >= (fw->size / sizeof(uint32_t)) || data[0] < 2)
707 goto err;
708
709 if (data[1] != GPMU_FIRMWARE_ID)
710 goto err;
711 ret = _read_fw2_block_header(&data[2],
Archana Obannagari29ece762017-09-18 12:49:24 +0530712 data[0] - 2,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700713 GPMU_FIRMWARE_ID,
714 adreno_dev->gpucore->gpmu_major,
715 adreno_dev->gpucore->gpmu_minor);
716 if (ret)
717 goto err;
718
Abhilash Kumarec5b7ba2017-07-28 16:29:53 +0530719 /* Integer overflow check for cmd_size */
720 if (data[2] > (data[0] - 2))
721 goto err;
722
Shrenuj Bansala419c792016-10-20 14:05:11 -0700723 cmds = data + data[2] + 3;
724 cmd_size = data[0] - data[2] - 2;
725
726 if (cmd_size > GPMU_INST_RAM_SIZE) {
727 KGSL_CORE_ERR(
728 "GPMU firmware block size is larger than RAM size\n");
729 goto err;
730 }
731
732 /* Everything is cool, so create some commands */
733 ret = _gpmu_create_load_cmds(adreno_dev, cmds, cmd_size);
734err:
735 if (fw)
736 release_firmware(fw);
737
738 return ret;
739}
740
741static int _gpmu_send_init_cmds(struct adreno_device *adreno_dev)
742{
743 struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
744 uint32_t *cmds;
745 uint32_t size = adreno_dev->gpmu_cmds_size;
746 int ret;
747
748 if (size == 0 || adreno_dev->gpmu_cmds == NULL)
749 return -EINVAL;
750
751 cmds = adreno_ringbuffer_allocspace(rb, size);
752 if (IS_ERR(cmds))
753 return PTR_ERR(cmds);
754 if (cmds == NULL)
755 return -ENOSPC;
756
757 /* Copy to the RB the predefined fw sequence cmds */
758 memcpy(cmds, adreno_dev->gpmu_cmds, size << 2);
759
760 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
761 if (ret != 0)
Carter Cooper8567af02017-03-15 14:22:03 -0600762 adreno_spin_idle_debug(adreno_dev,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700763 "gpmu initialization failed to idle\n");
764
765 return ret;
766}
767
768/*
769 * a5xx_gpmu_start() - Initialize and start the GPMU
770 * @adreno_dev: Pointer to adreno device
771 *
772 * Load the GPMU microcode, set up any features such as hardware clock gating
773 * or IFPC, and take the GPMU out of reset.
774 */
775static int a5xx_gpmu_start(struct adreno_device *adreno_dev)
776{
777 int ret;
778 unsigned int reg, retry = GPMU_FW_INIT_RETRY;
779 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
780
781 if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
782 return 0;
783
784 ret = _gpmu_send_init_cmds(adreno_dev);
785 if (ret)
786 return ret;
787
788 if (adreno_is_a530(adreno_dev)) {
789 /* GPMU clock gating setup */
790 kgsl_regwrite(device, A5XX_GPMU_WFI_CONFIG, 0x00004014);
791 }
792 /* Kick off GPMU firmware */
793 kgsl_regwrite(device, A5XX_GPMU_CM3_SYSRESET, 0);
794 /*
795 * The hardware team's estimation of GPMU firmware initialization
796 * latency is about 3000 cycles, that's about 5 to 24 usec.
797 */
798 do {
799 udelay(1);
800 kgsl_regread(device, A5XX_GPMU_GENERAL_0, &reg);
801 } while ((reg != 0xBABEFACE) && retry--);
802
803 if (reg != 0xBABEFACE) {
804 KGSL_CORE_ERR("GPMU firmware initialization timed out\n");
805 return -ETIMEDOUT;
806 }
807
808 if (!adreno_is_a530(adreno_dev)) {
809 kgsl_regread(device, A5XX_GPMU_GENERAL_1, &reg);
810
811 if (reg) {
812 KGSL_CORE_ERR(
813 "GPMU firmware initialization failed: %d\n",
814 reg);
815 return -EIO;
816 }
817 }
818 set_bit(ADRENO_DEVICE_GPMU_INITIALIZED, &adreno_dev->priv);
819 /*
820 * We are in AWARE state and IRQ line from GPU to host is
821 * disabled.
822 * Read pending GPMU interrupts and clear GPMU_RBBM_INTR_INFO.
823 */
824 kgsl_regread(device, A5XX_GPMU_RBBM_INTR_INFO, &reg);
825 /*
826 * Clear RBBM interrupt mask if any of GPMU interrupts
827 * are pending.
828 */
829 if (reg)
830 kgsl_regwrite(device,
831 A5XX_RBBM_INT_CLEAR_CMD,
832 1 << A5XX_INT_GPMU_FIRMWARE);
833 return ret;
834}
835
836struct kgsl_hwcg_reg {
837 unsigned int off;
838 unsigned int val;
839};
840
841static const struct kgsl_hwcg_reg a50x_hwcg_regs[] = {
842 {A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
843 {A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
844 {A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
845 {A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
846 {A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
847 {A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
848 {A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
849 {A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
850 {A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
851 {A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
852 {A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
853 {A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
854 {A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
855 {A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
856 {A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
857 {A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
858 {A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
859 {A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
860 {A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
861 {A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
862 {A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
863 {A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
864 {A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
865 {A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
866 {A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
867 {A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
868 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
869 {A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
870 {A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
871 {A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
872 {A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
873 {A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
874 {A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
875 {A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
876 {A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
877 {A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
878 {A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
879 {A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
880};
881
882static const struct kgsl_hwcg_reg a510_hwcg_regs[] = {
883 {A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
884 {A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
885 {A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
886 {A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
887 {A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
888 {A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
889 {A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
890 {A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
891 {A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
892 {A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
893 {A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
894 {A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
895 {A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
896 {A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
897 {A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
898 {A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
899 {A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
900 {A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
901 {A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
902 {A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
903 {A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
904 {A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
905 {A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
906 {A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
907 {A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
908 {A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
909 {A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
910 {A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
911 {A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
912 {A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
913 {A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
914 {A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
915 {A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
916 {A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
917 {A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
918 {A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
919 {A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
920 {A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
921 {A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
922 {A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
923 {A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
924 {A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
925 {A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
926 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
927 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
928 {A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
929 {A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
930 {A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
931 {A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
932 {A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
933 {A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
934 {A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
935 {A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
936 {A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
937 {A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
938 {A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
939};
940
941static const struct kgsl_hwcg_reg a530_hwcg_regs[] = {
942 {A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
943 {A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
944 {A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
945 {A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
946 {A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
947 {A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
948 {A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
949 {A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
950 {A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
951 {A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
952 {A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
953 {A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
954 {A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
955 {A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
956 {A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
957 {A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
958 {A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
959 {A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
960 {A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
961 {A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
962 {A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
963 {A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
964 {A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
965 {A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
966 {A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
967 {A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
968 {A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
969 {A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
970 {A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
971 {A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
972 {A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
973 {A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
974 {A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
975 {A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
976 {A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
977 {A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
978 {A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
979 {A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
980 {A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
981 {A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
982 {A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
983 {A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
984 {A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
985 {A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
986 {A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
987 {A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
988 {A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
989 {A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
990 {A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
991 {A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
992 {A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
993 {A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
994 {A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
995 {A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
996 {A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
997 {A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
998 {A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
999 {A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
1000 {A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
1001 {A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
1002 {A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
1003 {A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
1004 {A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
1005 {A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
1006 {A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
1007 {A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
1008 {A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
1009 {A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
1010 {A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
1011 {A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
1012 {A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
1013 {A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
1014 {A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
1015 {A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
1016 {A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
1017 {A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
1018 {A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
1019 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
1020 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
1021 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
1022 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
1023 {A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
1024 {A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
1025 {A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
1026 {A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
1027 {A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
1028 {A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
1029 {A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
1030 {A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
1031 {A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
1032 {A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
1033 {A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
1034};
1035
1036
1037static const struct kgsl_hwcg_reg a540_hwcg_regs[] = {
1038 {A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
1039 {A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
1040 {A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
1041 {A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
1042 {A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
1043 {A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
1044 {A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
1045 {A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
1046 {A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
1047 {A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
1048 {A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
1049 {A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
1050 {A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
1051 {A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
1052 {A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
1053 {A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
1054 {A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
1055 {A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
1056 {A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
1057 {A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
1058 {A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
1059 {A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
1060 {A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
1061 {A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
1062 {A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
1063 {A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
1064 {A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
1065 {A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
1066 {A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
1067 {A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
1068 {A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
1069 {A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
1070 {A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
1071 {A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
1072 {A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
1073 {A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
1074 {A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
1075 {A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
1076 {A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
1077 {A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
1078 {A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
1079 {A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
1080 {A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
1081 {A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
1082 {A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
1083 {A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
1084 {A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
1085 {A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
1086 {A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
1087 {A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
1088 {A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
1089 {A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
1090 {A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
1091 {A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
1092 {A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
1093 {A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
1094 {A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
1095 {A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
1096 {A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
1097 {A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
1098 {A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
1099 {A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
1100 {A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
1101 {A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
1102 {A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
1103 {A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
1104 {A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
1105 {A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
1106 {A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
1107 {A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
1108 {A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
1109 {A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
1110 {A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
1111 {A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
1112 {A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
1113 {A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
1114 {A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
1115 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
1116 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
1117 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
1118 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
1119 {A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
1120 {A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
1121 {A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
1122 {A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
1123 {A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
1124 {A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
1125 {A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
1126 {A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
1127 {A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
1128 {A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
1129 {A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
1130 {A5XX_RBBM_CLOCK_HYST_GPMU, 0x00000222},
1131 {A5XX_RBBM_CLOCK_DELAY_GPMU, 0x00000770},
1132 {A5XX_RBBM_CLOCK_HYST_GPMU, 0x00000004}
1133};
1134
Rajesh Kemisetti29dd66c2017-01-15 18:59:29 +05301135static const struct kgsl_hwcg_reg a512_hwcg_regs[] = {
1136 {A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
1137 {A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
1138 {A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
1139 {A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
1140 {A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
1141 {A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
1142 {A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
1143 {A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
1144 {A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
1145 {A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
1146 {A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
1147 {A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
1148 {A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
1149 {A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
1150 {A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
1151 {A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
1152 {A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
1153 {A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
1154 {A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
1155 {A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
1156 {A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
1157 {A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
1158 {A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
1159 {A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
1160 {A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
1161 {A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
1162 {A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
1163 {A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
1164 {A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
1165 {A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
1166 {A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
1167 {A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
1168 {A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
1169 {A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
1170 {A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
1171 {A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
1172 {A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
1173 {A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
1174 {A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
Rajesh Kemisetti04593452017-03-01 15:45:31 +05301175 {A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
Rajesh Kemisetti29dd66c2017-01-15 18:59:29 +05301176 {A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
1177 {A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
1178 {A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
1179 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
1180 {A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
1181 {A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
1182 {A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
1183 {A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
1184 {A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
1185 {A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
1186 {A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
1187 {A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
1188 {A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
1189 {A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
1190 {A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
1191 {A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
1192};
1193
Shrenuj Bansala419c792016-10-20 14:05:11 -07001194static const struct {
1195 int (*devfunc)(struct adreno_device *adreno_dev);
1196 const struct kgsl_hwcg_reg *regs;
1197 unsigned int count;
1198} a5xx_hwcg_registers[] = {
1199 { adreno_is_a540, a540_hwcg_regs, ARRAY_SIZE(a540_hwcg_regs) },
1200 { adreno_is_a530, a530_hwcg_regs, ARRAY_SIZE(a530_hwcg_regs) },
Rajesh Kemisetti29dd66c2017-01-15 18:59:29 +05301201 { adreno_is_a512, a512_hwcg_regs, ARRAY_SIZE(a512_hwcg_regs) },
Shrenuj Bansala419c792016-10-20 14:05:11 -07001202 { adreno_is_a510, a510_hwcg_regs, ARRAY_SIZE(a510_hwcg_regs) },
Hareesh Gundu0891bd92018-04-12 14:46:08 +05301203 { adreno_is_a504, a50x_hwcg_regs, ARRAY_SIZE(a50x_hwcg_regs) },
Shrenuj Bansala419c792016-10-20 14:05:11 -07001204 { adreno_is_a505, a50x_hwcg_regs, ARRAY_SIZE(a50x_hwcg_regs) },
1205 { adreno_is_a506, a50x_hwcg_regs, ARRAY_SIZE(a50x_hwcg_regs) },
Rajesh Kemisettiaed6ec72017-02-06 09:37:00 +05301206 { adreno_is_a508, a50x_hwcg_regs, ARRAY_SIZE(a50x_hwcg_regs) },
Shrenuj Bansala419c792016-10-20 14:05:11 -07001207};
1208
1209void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
1210{
1211 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1212 const struct kgsl_hwcg_reg *regs;
1213 int i, j;
1214
1215 if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
1216 return;
1217
1218 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_registers); i++) {
1219 if (a5xx_hwcg_registers[i].devfunc(adreno_dev))
1220 break;
1221 }
1222
1223 if (i == ARRAY_SIZE(a5xx_hwcg_registers))
1224 return;
1225
1226 regs = a5xx_hwcg_registers[i].regs;
1227
1228 for (j = 0; j < a5xx_hwcg_registers[i].count; j++)
1229 kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
1230
1231 /* enable top level HWCG */
1232 kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL, on ? 0xAAA8AA00 : 0);
1233 kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180);
1234}
1235
Archana Obannagari29ece762017-09-18 12:49:24 +05301236static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
1237 uint32_t id, uint32_t major, uint32_t minor)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001238{
1239 uint32_t header_size;
1240 int i = 1;
1241
1242 if (header == NULL)
1243 return -ENOMEM;
1244
1245 header_size = header[0];
1246 /* Headers have limited size and always occur as pairs of words */
Archana Obannagari29ece762017-09-18 12:49:24 +05301247 if (header_size > MAX_HEADER_SIZE || header_size >= remain ||
1248 header_size % 2 || header_size == 0)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001249 return -EINVAL;
1250 /* Sequences must have an identifying id first thing in their header */
1251 if (id == GPMU_SEQUENCE_ID) {
1252 if (header[i] != HEADER_SEQUENCE ||
1253 (header[i + 1] >= MAX_SEQUENCE_ID))
1254 return -EINVAL;
1255 i += 2;
1256 }
1257 for (; i < header_size; i += 2) {
1258 switch (header[i]) {
1259 /* Major Version */
1260 case HEADER_MAJOR:
1261 if ((major > header[i + 1]) &&
1262 header[i + 1]) {
1263 KGSL_CORE_ERR(
1264 "GPMU major version mis-match %d, %d\n",
1265 major, header[i + 1]);
1266 return -EINVAL;
1267 }
1268 break;
1269 case HEADER_MINOR:
1270 if (minor > header[i + 1])
1271 KGSL_CORE_ERR(
1272 "GPMU minor version mis-match %d %d\n",
1273 minor, header[i + 1]);
1274 break;
1275 case HEADER_DATE:
1276 case HEADER_TIME:
1277 break;
1278 default:
1279 KGSL_CORE_ERR("GPMU unknown header ID %d\n",
1280 header[i]);
1281 }
1282 }
1283 return 0;
1284}
1285
1286/*
1287 * Read in the register sequence file and save pointers to the
1288 * necessary sequences.
1289 *
1290 * GPU sequence file format (one dword per field unless noted):
1291 * Block 1 length (length dword field not inclusive)
1292 * Block 1 type = Sequence = 3
1293 * Block Header length (length dword field not inclusive)
1294 * BH field ID = Sequence field ID
1295 * BH field data = Sequence ID
1296 * BH field ID
1297 * BH field data
1298 * ...
1299 * Opcode 0 ID
1300 * Opcode 0 data M words
1301 * Opcode 1 ID
1302 * Opcode 1 data N words
1303 * ...
1304 * Opcode X ID
1305 * Opcode X data O words
1306 * Block 2 length...
1307 */
1308static void _load_regfile(struct adreno_device *adreno_dev)
1309{
1310 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1311 const struct firmware *fw;
Archana Srirameb22b552017-09-18 17:33:31 +05301312 uint64_t block_size = 0, block_total = 0;
1313 uint32_t fw_size, *block;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001314 int ret = -EINVAL;
1315
1316 if (!adreno_dev->gpucore->regfw_name)
1317 return;
1318
1319 ret = request_firmware(&fw, adreno_dev->gpucore->regfw_name,
1320 device->dev);
1321 if (ret) {
1322 KGSL_PWR_ERR(device, "request firmware failed %d, %s\n",
1323 ret, adreno_dev->gpucore->regfw_name);
1324 return;
1325 }
1326
1327 fw_size = fw->size / sizeof(uint32_t);
1328 /* Min valid file of size 6, see file description */
1329 if (fw_size < 6)
1330 goto err;
1331 block = (uint32_t *)fw->data;
1332 /* All offset numbers calculated from file description */
1333 while (block_total < fw_size) {
1334 block_size = block[0];
Archana Srirameb22b552017-09-18 17:33:31 +05301335 if (((block_total + block_size) >= fw_size)
1336 || block_size < 5)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001337 goto err;
1338 if (block[1] != GPMU_SEQUENCE_ID)
1339 goto err;
1340
1341 /* For now ignore blocks other than the LM sequence */
1342 if (block[4] == LM_SEQUENCE_ID) {
1343 ret = _read_fw2_block_header(&block[2],
Archana Obannagari29ece762017-09-18 12:49:24 +05301344 block_size - 2,
Shrenuj Bansala419c792016-10-20 14:05:11 -07001345 GPMU_SEQUENCE_ID,
1346 adreno_dev->gpucore->lm_major,
1347 adreno_dev->gpucore->lm_minor);
1348 if (ret)
1349 goto err;
1350
1351 adreno_dev->lm_fw = fw;
Archana Srirameb22b552017-09-18 17:33:31 +05301352
1353 if (block[2] > (block_size - 2))
1354 goto err;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001355 adreno_dev->lm_sequence = block + block[2] + 3;
1356 adreno_dev->lm_size = block_size - block[2] - 2;
1357 }
1358 block_total += (block_size + 1);
1359 block += (block_size + 1);
1360 }
1361 if (adreno_dev->lm_sequence)
1362 return;
1363
1364err:
1365 release_firmware(fw);
1366 KGSL_PWR_ERR(device,
Archana Srirameb22b552017-09-18 17:33:31 +05301367 "Register file failed to load sz=%d bsz=%llu header=%d\n",
Shrenuj Bansala419c792016-10-20 14:05:11 -07001368 fw_size, block_size, ret);
1369}
1370
1371static int _execute_reg_sequence(struct adreno_device *adreno_dev,
1372 uint32_t *opcode, uint32_t length)
1373{
1374 uint32_t *cur = opcode;
1375 uint64_t reg, val;
1376
1377 /* todo double check the reg writes */
1378 while ((cur - opcode) < length) {
Archana Sriram2eac4562018-03-26 13:14:04 +05301379 if (cur[0] == 1 && (length - (cur - opcode) >= 4)) {
Archana Srirama55a2132018-02-09 17:50:19 +05301380 /* Write a 32 bit value to a 64 bit reg */
Shrenuj Bansala419c792016-10-20 14:05:11 -07001381 reg = cur[2];
1382 reg = (reg << 32) | cur[1];
1383 kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, cur[3]);
1384 cur += 4;
Archana Sriram2eac4562018-03-26 13:14:04 +05301385 } else if (cur[0] == 2 && (length - (cur - opcode) >= 5)) {
Archana Srirama55a2132018-02-09 17:50:19 +05301386 /* Write a 64 bit value to a 64 bit reg */
Shrenuj Bansala419c792016-10-20 14:05:11 -07001387 reg = cur[2];
1388 reg = (reg << 32) | cur[1];
1389 val = cur[4];
1390 val = (val << 32) | cur[3];
1391 kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, val);
1392 cur += 5;
Archana Sriram2eac4562018-03-26 13:14:04 +05301393 } else if (cur[0] == 3 && (length - (cur - opcode) >= 2)) {
Archana Srirama55a2132018-02-09 17:50:19 +05301394 /* Delay for X usec */
Shrenuj Bansala419c792016-10-20 14:05:11 -07001395 udelay(cur[1]);
1396 cur += 2;
Archana Srirama55a2132018-02-09 17:50:19 +05301397 } else
Shrenuj Bansala419c792016-10-20 14:05:11 -07001398 return -EINVAL;
Archana Srirama55a2132018-02-09 17:50:19 +05301399 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07001400 return 0;
1401}
1402
1403static uint32_t _write_voltage_table(struct adreno_device *adreno_dev,
1404 unsigned int addr)
1405{
1406 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1407 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1408 int i;
1409 struct dev_pm_opp *opp;
1410 int levels = pwr->num_pwrlevels - 1;
1411 unsigned int mvolt = 0;
1412
1413 kgsl_regwrite(device, addr++, adreno_dev->gpucore->max_power);
1414 kgsl_regwrite(device, addr++, levels);
1415
1416 /* Write voltage in mV and frequency in MHz */
1417 for (i = 0; i < levels; i++) {
1418 opp = dev_pm_opp_find_freq_exact(&device->pdev->dev,
1419 pwr->pwrlevels[i].gpu_freq, true);
1420 /* _opp_get returns uV, convert to mV */
1421 if (!IS_ERR(opp))
1422 mvolt = dev_pm_opp_get_voltage(opp) / 1000;
1423 kgsl_regwrite(device, addr++, mvolt);
1424 kgsl_regwrite(device, addr++,
1425 pwr->pwrlevels[i].gpu_freq / 1000000);
1426 }
1427 return (levels * 2 + 2);
1428}
1429
1430static uint32_t lm_limit(struct adreno_device *adreno_dev)
1431{
1432 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1433
1434 if (adreno_dev->lm_limit)
1435 return adreno_dev->lm_limit;
1436
1437 if (of_property_read_u32(device->pdev->dev.of_node, "qcom,lm-limit",
1438 &adreno_dev->lm_limit))
1439 adreno_dev->lm_limit = LM_DEFAULT_LIMIT;
1440
1441 return adreno_dev->lm_limit;
1442}
1443/*
1444 * a5xx_lm_init() - Initialize LM/DPM on the GPMU
1445 * @adreno_dev: The adreno device pointer
1446 */
1447static void a530_lm_init(struct adreno_device *adreno_dev)
1448{
1449 uint32_t length;
1450 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1451
1452 if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
1453 !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
1454 return;
1455
1456 /* If something was wrong with the sequence file, return */
1457 if (adreno_dev->lm_sequence == NULL)
1458 return;
1459
1460 /* Write LM registers including DPM ucode, coefficients, and config */
1461 if (_execute_reg_sequence(adreno_dev, adreno_dev->lm_sequence,
1462 adreno_dev->lm_size)) {
1463 /* If the sequence is invalid, it's not getting better */
1464 adreno_dev->lm_sequence = NULL;
1465 KGSL_PWR_WARN(device,
1466 "Invalid LM sequence\n");
1467 return;
1468 }
1469
1470 kgsl_regwrite(device, A5XX_GPMU_TEMP_SENSOR_ID,
1471 adreno_dev->gpucore->gpmu_tsens);
1472 kgsl_regwrite(device, A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x1);
1473 kgsl_regwrite(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x1);
1474
1475 kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE,
1476 (0x80000000 | device->pwrctrl.active_pwrlevel));
1477 /* use the leakage to set this value at runtime */
1478 kgsl_regwrite(device, A5XX_GPMU_BASE_LEAKAGE,
1479 adreno_dev->lm_leakage);
1480
1481 /* Enable the power threshold and set it to 6000m */
1482 kgsl_regwrite(device, A5XX_GPMU_GPMU_PWR_THRESHOLD,
1483 0x80000000 | lm_limit(adreno_dev));
1484
1485 kgsl_regwrite(device, A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
1486 kgsl_regwrite(device, A5XX_GDPM_CONFIG1, 0x00201FF1);
1487
1488 /* Send an initial message to the GPMU with the LM voltage table */
1489 kgsl_regwrite(device, AGC_MSG_STATE, 1);
1490 kgsl_regwrite(device, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
1491 length = _write_voltage_table(adreno_dev, AGC_MSG_PAYLOAD);
1492 kgsl_regwrite(device, AGC_MSG_PAYLOAD_SIZE, length * sizeof(uint32_t));
1493 kgsl_regwrite(device, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
1494}
1495
1496/*
1497 * a5xx_lm_enable() - Enable the LM/DPM feature on the GPMU
1498 * @adreno_dev: The adreno device pointer
1499 */
1500static void a530_lm_enable(struct adreno_device *adreno_dev)
1501{
1502 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1503
1504 if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
1505 !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
1506 return;
1507
1508 /* If no sequence properly initialized, return */
1509 if (adreno_dev->lm_sequence == NULL)
1510 return;
1511
1512 kgsl_regwrite(device, A5XX_GDPM_INT_MASK, 0x00000000);
1513 kgsl_regwrite(device, A5XX_GDPM_INT_EN, 0x0000000A);
1514 kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x00000001);
1515 kgsl_regwrite(device, A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK,
1516 0x00050000);
1517 kgsl_regwrite(device, A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL,
1518 0x00030000);
1519
1520 if (adreno_is_a530(adreno_dev))
1521 /* Program throttle control, do not enable idle DCS on v3+ */
1522 kgsl_regwrite(device, A5XX_GPMU_CLOCK_THROTTLE_CTRL,
1523 adreno_is_a530v2(adreno_dev) ? 0x00060011 : 0x00000011);
1524}
1525
1526static void a540_lm_init(struct adreno_device *adreno_dev)
1527{
1528 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1529 uint32_t agc_lm_config = AGC_BCL_DISABLED |
1530 ((ADRENO_CHIPID_PATCH(adreno_dev->chipid) & 0x3)
1531 << AGC_GPU_VERSION_SHIFT);
1532 unsigned int r;
1533
1534 if (!test_bit(ADRENO_THROTTLING_CTRL, &adreno_dev->pwrctrl_flag))
1535 agc_lm_config |= AGC_THROTTLE_DISABLE;
1536
1537 if (lm_on(adreno_dev)) {
1538 agc_lm_config |=
1539 AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE |
1540 AGC_LM_CONFIG_ISENSE_ENABLE;
1541
1542 kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r);
1543
1544 if ((r & GPMU_ISENSE_STATUS) == GPMU_ISENSE_END_POINT_CAL_ERR) {
1545 KGSL_CORE_ERR(
1546 "GPMU: ISENSE end point calibration failure\n");
1547 agc_lm_config |= AGC_LM_CONFIG_ENABLE_ERROR;
1548 }
1549 }
1550
1551 kgsl_regwrite(device, AGC_MSG_STATE, 0x80000001);
1552 kgsl_regwrite(device, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
1553 (void) _write_voltage_table(adreno_dev, AGC_MSG_PAYLOAD);
1554 kgsl_regwrite(device, AGC_MSG_PAYLOAD + AGC_LM_CONFIG, agc_lm_config);
1555 kgsl_regwrite(device, AGC_MSG_PAYLOAD + AGC_LEVEL_CONFIG,
1556 (unsigned int) ~(GENMASK(LM_DCVS_LIMIT, 0) |
1557 GENMASK(16+LM_DCVS_LIMIT, 16)));
1558
1559 kgsl_regwrite(device, AGC_MSG_PAYLOAD_SIZE,
1560 (AGC_LEVEL_CONFIG + 1) * sizeof(uint32_t));
1561 kgsl_regwrite(device, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
1562
1563 kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE,
1564 (0x80000000 | device->pwrctrl.active_pwrlevel));
1565
1566 kgsl_regwrite(device, A5XX_GPMU_GPMU_PWR_THRESHOLD,
1567 PWR_THRESHOLD_VALID | lm_limit(adreno_dev));
1568
1569 kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK,
1570 VOLTAGE_INTR_EN);
1571}
1572
1573
1574static void a5xx_lm_enable(struct adreno_device *adreno_dev)
1575{
1576 if (adreno_is_a530(adreno_dev))
1577 a530_lm_enable(adreno_dev);
1578}
1579
1580static void a5xx_lm_init(struct adreno_device *adreno_dev)
1581{
1582 if (adreno_is_a530(adreno_dev))
1583 a530_lm_init(adreno_dev);
1584 else if (adreno_is_a540(adreno_dev))
1585 a540_lm_init(adreno_dev);
1586}
1587
1588static int gpmu_set_level(struct adreno_device *adreno_dev, unsigned int val)
1589{
1590 unsigned int reg;
1591 int retry = 100;
1592
1593 kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_GPMU_GPMU_VOLTAGE, val);
1594
1595 do {
1596 kgsl_regread(KGSL_DEVICE(adreno_dev), A5XX_GPMU_GPMU_VOLTAGE,
1597 &reg);
1598 } while ((reg & 0x80000000) && retry--);
1599
1600 return (reg & 0x80000000) ? -ETIMEDOUT : 0;
1601}
1602
1603/*
1604 * a5xx_pwrlevel_change_settings() - Program the hardware during power level
1605 * transitions
1606 * @adreno_dev: The adreno device pointer
1607 * @prelevel: The previous power level
1608 * @postlevel: The new power level
1609 * @post: True if called after the clock change has taken effect
1610 */
1611static void a5xx_pwrlevel_change_settings(struct adreno_device *adreno_dev,
1612 unsigned int prelevel, unsigned int postlevel,
1613 bool post)
1614{
1615 int on = 0;
1616
1617 /*
1618 * On pre A540 HW only call through if PPD or LMx
1619 * is supported and enabled
1620 */
1621 if (ADRENO_FEATURE(adreno_dev, ADRENO_PPD) &&
1622 test_bit(ADRENO_PPD_CTRL, &adreno_dev->pwrctrl_flag))
1623 on = ADRENO_PPD;
1624
1625 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
1626 test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
1627 on = ADRENO_LM;
1628
1629 /* On 540+ HW call through unconditionally as long as GPMU is enabled */
1630 if (ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
1631 if (adreno_is_a540(adreno_dev))
1632 on = ADRENO_GPMU;
1633 }
1634
1635 if (!on)
1636 return;
1637
1638 if (post == 0) {
1639 if (gpmu_set_level(adreno_dev, (0x80000010 | postlevel)))
1640 KGSL_CORE_ERR(
1641 "GPMU pre powerlevel did not stabilize\n");
1642 } else {
1643 if (gpmu_set_level(adreno_dev, (0x80000000 | postlevel)))
1644 KGSL_CORE_ERR(
1645 "GPMU post powerlevel did not stabilize\n");
1646 }
1647}
1648
1649static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
Deepak Kumara309e0e2017-03-17 17:27:42 +05301650 const char *name, struct clk *clk, bool on)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001651{
Deepak Kumara309e0e2017-03-17 17:27:42 +05301652
1653 if (!adreno_is_a540(adreno_dev) && !adreno_is_a512(adreno_dev) &&
1654 !adreno_is_a508(adreno_dev))
1655 return;
1656
Rajesh Kemisettibfd1eb92017-01-26 18:08:16 +05301657 /* Handle clock settings for GFX PSCBCs */
Deepak Kumara309e0e2017-03-17 17:27:42 +05301658 if (on) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07001659 if (!strcmp(name, "mem_iface_clk")) {
1660 clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
1661 clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
1662 } else if (!strcmp(name, "core_clk")) {
1663 clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH);
1664 clk_set_flags(clk, CLKFLAG_RETAIN_MEM);
1665 }
Deepak Kumara309e0e2017-03-17 17:27:42 +05301666 } else {
1667 if (!strcmp(name, "core_clk")) {
1668 clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
1669 clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
1670 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07001671 }
1672}
1673
1674static void a5xx_count_throttles(struct adreno_device *adreno_dev,
1675 uint64_t adj)
1676{
1677 if (adreno_is_a530(adreno_dev))
1678 kgsl_regread(KGSL_DEVICE(adreno_dev),
1679 adreno_dev->lm_threshold_count,
1680 &adreno_dev->lm_threshold_cross);
1681 else if (adreno_is_a540(adreno_dev))
1682 adreno_dev->lm_threshold_cross = adj;
1683}
1684
1685static int a5xx_enable_pwr_counters(struct adreno_device *adreno_dev,
1686 unsigned int counter)
1687{
1688 /*
1689 * On 5XX we have to emulate the PWR counters which are physically
1690 * missing. Program countable 6 on RBBM_PERFCTR_RBBM_0 as a substitute
1691 * for PWR:1. Don't emulate PWR:0 as nobody uses it and we don't want
1692 * to take away too many of the generic RBBM counters.
1693 */
1694
1695 if (counter == 0)
1696 return -EINVAL;
1697
1698 kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
1699
1700 return 0;
1701}
1702
1703/* FW driven idle 10% throttle */
1704#define IDLE_10PCT 0
1705/* number of cycles when clock is throttled by 50% (CRC) */
1706#define CRC_50PCT 1
1707/* number of cycles when clock is throttled by more than 50% (CRC) */
1708#define CRC_MORE50PCT 2
1709/* number of cycles when clock is throttle by less than 50% (CRC) */
1710#define CRC_LESS50PCT 3
1711
1712static uint64_t a5xx_read_throttling_counters(struct adreno_device *adreno_dev)
1713{
1714 int i, adj;
1715 uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS];
1716 struct adreno_busy_data *busy = &adreno_dev->busy_data;
1717
1718 if (!adreno_is_a540(adreno_dev))
1719 return 0;
1720
1721 if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
1722 return 0;
1723
1724 if (!test_bit(ADRENO_THROTTLING_CTRL, &adreno_dev->pwrctrl_flag))
1725 return 0;
1726
1727 for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
1728 if (!adreno_dev->gpmu_throttle_counters[i])
1729 return 0;
1730
1731 th[i] = counter_delta(KGSL_DEVICE(adreno_dev),
1732 adreno_dev->gpmu_throttle_counters[i],
1733 &busy->throttle_cycles[i]);
1734 }
1735 adj = th[CRC_MORE50PCT] - th[IDLE_10PCT];
1736 adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3;
1737
1738 trace_kgsl_clock_throttling(
1739 th[IDLE_10PCT], th[CRC_50PCT],
1740 th[CRC_MORE50PCT], th[CRC_LESS50PCT],
1741 adj);
1742 return adj;
1743}
1744
1745static void a5xx_enable_64bit(struct adreno_device *adreno_dev)
1746{
1747 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1748
1749 kgsl_regwrite(device, A5XX_CP_ADDR_MODE_CNTL, 0x1);
1750 kgsl_regwrite(device, A5XX_VSC_ADDR_MODE_CNTL, 0x1);
1751 kgsl_regwrite(device, A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
1752 kgsl_regwrite(device, A5XX_RB_ADDR_MODE_CNTL, 0x1);
1753 kgsl_regwrite(device, A5XX_PC_ADDR_MODE_CNTL, 0x1);
1754 kgsl_regwrite(device, A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
1755 kgsl_regwrite(device, A5XX_VFD_ADDR_MODE_CNTL, 0x1);
1756 kgsl_regwrite(device, A5XX_VPC_ADDR_MODE_CNTL, 0x1);
1757 kgsl_regwrite(device, A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
1758 kgsl_regwrite(device, A5XX_SP_ADDR_MODE_CNTL, 0x1);
1759 kgsl_regwrite(device, A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
1760 kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
1761}
1762
1763/*
1764 * a5xx_gpmu_reset() - Re-enable GPMU based power features and restart GPMU
1765 * @work: Pointer to the work struct for gpmu reset
1766 *
1767 * Load the GPMU microcode, set up any features such as hardware clock gating
1768 * or IFPC, and take the GPMU out of reset.
1769 */
1770static void a5xx_gpmu_reset(struct work_struct *work)
1771{
1772 struct adreno_device *adreno_dev = container_of(work,
1773 struct adreno_device, gpmu_work);
1774 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1775
1776 if (test_bit(ADRENO_DEVICE_GPMU_INITIALIZED, &adreno_dev->priv))
1777 return;
1778
1779 /*
1780 * If GPMU has already experienced a restart or is in the process of it
1781 * after the watchdog timeout, then there is no need to reset GPMU
1782 * again.
1783 */
1784 if (device->state != KGSL_STATE_NAP &&
1785 device->state != KGSL_STATE_AWARE &&
1786 device->state != KGSL_STATE_ACTIVE)
1787 return;
1788
1789 mutex_lock(&device->mutex);
1790
1791 if (device->state == KGSL_STATE_NAP)
1792 kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
1793
1794 if (a5xx_regulator_enable(adreno_dev))
1795 goto out;
1796
1797 /* Soft reset of the GPMU block */
1798 kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD, BIT(16));
1799
1800 /* GPU comes up in secured mode, make it unsecured by default */
1801 if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
1802 kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
1803
1804
1805 a5xx_gpmu_init(adreno_dev);
1806
1807out:
1808 mutex_unlock(&device->mutex);
1809}
1810
1811static void _setup_throttling_counters(struct adreno_device *adreno_dev)
1812{
1813 int i, ret;
1814
1815 if (!adreno_is_a540(adreno_dev))
1816 return;
1817
1818 if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
1819 return;
1820
1821 for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
1822 /* reset throttled cycles ivalue */
1823 adreno_dev->busy_data.throttle_cycles[i] = 0;
1824
1825 if (adreno_dev->gpmu_throttle_counters[i] != 0)
1826 continue;
1827 ret = adreno_perfcounter_get(adreno_dev,
1828 KGSL_PERFCOUNTER_GROUP_GPMU_PWR,
1829 ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i,
1830 &adreno_dev->gpmu_throttle_counters[i],
1831 NULL,
1832 PERFCOUNTER_FLAG_KERNEL);
1833 WARN_ONCE(ret, "Unable to get clock throttling counter %x\n",
1834 ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i);
1835 }
1836}
1837
1838/*
1839 * a5xx_start() - Device start
1840 * @adreno_dev: Pointer to adreno device
1841 *
1842 * a5xx device start
1843 */
1844static void a5xx_start(struct adreno_device *adreno_dev)
1845{
1846 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1847 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1848 unsigned int bit;
1849 int ret;
1850
1851 if (adreno_is_a530(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM)
1852 && adreno_dev->lm_threshold_count == 0) {
1853
1854 ret = adreno_perfcounter_get(adreno_dev,
1855 KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 27,
1856 &adreno_dev->lm_threshold_count, NULL,
1857 PERFCOUNTER_FLAG_KERNEL);
1858 /* Ignore noncritical ret - used for debugfs */
1859 if (ret)
1860 adreno_dev->lm_threshold_count = 0;
1861 }
1862
1863 _setup_throttling_counters(adreno_dev);
1864
1865 adreno_vbif_start(adreno_dev, a5xx_vbif_platforms,
1866 ARRAY_SIZE(a5xx_vbif_platforms));
1867
1868 /* Make all blocks contribute to the GPU BUSY perf counter */
1869 kgsl_regwrite(device, A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
1870
1871 /*
1872 * Enable the RBBM error reporting bits. This lets us get
1873 * useful information on failure
1874 */
1875 kgsl_regwrite(device, A5XX_RBBM_AHB_CNTL0, 0x00000001);
1876
1877 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_FAULT_DETECT_MASK)) {
1878 /*
1879 * We have 4 RB units, and only RB0 activity signals are
1880 * working correctly. Mask out RB1-3 activity signals
1881 * from the HW hang detection logic as per
1882 * recommendation of hardware team.
1883 */
1884 kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
1885 0xF0000000);
1886 kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
1887 0xFFFFFFFF);
1888 kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
1889 0xFFFFFFFF);
1890 kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
1891 0xFFFFFFFF);
1892 kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
1893 0xFFFFFFFF);
1894 kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
1895 0xFFFFFFFF);
1896 kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
1897 0xFFFFFFFF);
1898 kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
1899 0xFFFFFFFF);
1900 }
1901
1902 /*
1903 * Turn on hang detection for a530 v2 and beyond. This spews a
1904 * lot of useful information into the RBBM registers on a hang.
1905 */
1906 if (!adreno_is_a530v1(adreno_dev)) {
1907
1908 set_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
1909 gpudev->irq->mask |= (1 << A5XX_INT_MISC_HANG_DETECT);
1910 /*
1911 * Set hang detection threshold to 4 million cycles
1912 * (0x3FFFF*16)
1913 */
1914 kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
1915 (1 << 30) | 0x3FFFF);
1916 }
1917
1918
1919 /* Turn on performance counters */
1920 kgsl_regwrite(device, A5XX_RBBM_PERFCTR_CNTL, 0x01);
1921
1922 /*
1923 * This is to increase performance by restricting VFD's cache access,
1924 * so that LRZ and other data get evicted less.
1925 */
1926 kgsl_regwrite(device, A5XX_UCHE_CACHE_WAYS, 0x02);
1927
1928 /*
1929 * Set UCHE_WRITE_THRU_BASE to the UCHE_TRAP_BASE effectively
1930 * disabling L2 bypass
1931 */
1932 kgsl_regwrite(device, A5XX_UCHE_TRAP_BASE_LO, 0xffff0000);
1933 kgsl_regwrite(device, A5XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
1934 kgsl_regwrite(device, A5XX_UCHE_WRITE_THRU_BASE_LO, 0xffff0000);
1935 kgsl_regwrite(device, A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
1936
1937 /* Program the GMEM VA range for the UCHE path */
1938 kgsl_regwrite(device, A5XX_UCHE_GMEM_RANGE_MIN_LO,
1939 ADRENO_UCHE_GMEM_BASE);
1940 kgsl_regwrite(device, A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
1941 kgsl_regwrite(device, A5XX_UCHE_GMEM_RANGE_MAX_LO,
1942 ADRENO_UCHE_GMEM_BASE +
1943 adreno_dev->gmem_size - 1);
1944 kgsl_regwrite(device, A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
1945
1946 /*
1947 * Below CP registers are 0x0 by default, program init
1948 * values based on a5xx flavor.
1949 */
Hareesh Gundu0891bd92018-04-12 14:46:08 +05301950 if (adreno_is_a504_to_a506(adreno_dev) || adreno_is_a508(adreno_dev)) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07001951 kgsl_regwrite(device, A5XX_CP_MEQ_THRESHOLDS, 0x20);
1952 kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x400);
1953 kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
1954 kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
1955 } else if (adreno_is_a510(adreno_dev)) {
1956 kgsl_regwrite(device, A5XX_CP_MEQ_THRESHOLDS, 0x20);
1957 kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x20);
1958 kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
1959 kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
Rajesh Kemisetti29dd66c2017-01-15 18:59:29 +05301960 } else if (adreno_is_a540(adreno_dev) || adreno_is_a512(adreno_dev)) {
1961 kgsl_regwrite(device, A5XX_CP_MEQ_THRESHOLDS, 0x40);
1962 kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x400);
1963 kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
1964 kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001965 } else {
1966 kgsl_regwrite(device, A5XX_CP_MEQ_THRESHOLDS, 0x40);
1967 kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x40);
1968 kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
1969 kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
1970 }
1971
1972 /*
1973 * vtxFifo and primFifo thresholds default values
1974 * are different.
1975 */
Hareesh Gundu0891bd92018-04-12 14:46:08 +05301976 if (adreno_is_a504_to_a506(adreno_dev) || adreno_is_a508(adreno_dev))
Shrenuj Bansala419c792016-10-20 14:05:11 -07001977 kgsl_regwrite(device, A5XX_PC_DBG_ECO_CNTL,
1978 (0x100 << 11 | 0x100 << 22));
1979 else if (adreno_is_a510(adreno_dev) || adreno_is_a512(adreno_dev))
1980 kgsl_regwrite(device, A5XX_PC_DBG_ECO_CNTL,
1981 (0x200 << 11 | 0x200 << 22));
1982 else
1983 kgsl_regwrite(device, A5XX_PC_DBG_ECO_CNTL,
1984 (0x400 << 11 | 0x300 << 22));
1985
1986 /*
1987 * A5x USP LDST non valid pixel wrongly update read combine offset
1988 * In A5xx we added optimization for read combine. There could be cases
1989 * on a530 v1 there is no valid pixel but the active masks is not
1990 * cleared and the offset can be wrongly updated if the invalid address
1991 * can be combined. The wrongly latched value will make the returning
1992 * data got shifted at wrong offset. workaround this issue by disabling
1993 * LD combine, bit[25] of SP_DBG_ECO_CNTL (sp chicken bit[17]) need to
1994 * be set to 1, default is 0(enable)
1995 */
1996 if (adreno_is_a530v1(adreno_dev))
1997 kgsl_regrmw(device, A5XX_SP_DBG_ECO_CNTL, 0, (1 << 25));
1998
1999 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI)) {
2000 /*
2001 * Set TWOPASSUSEWFI in A5XX_PC_DBG_ECO_CNTL for
2002 * microcodes after v77
2003 */
2004 if ((adreno_compare_pfp_version(adreno_dev, 0x5FF077) >= 0))
2005 kgsl_regrmw(device, A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
2006 }
2007
2008 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING)) {
2009 /*
2010 * Disable RB sampler datapath DP2 clock gating
2011 * optimization for 1-SP GPU's, by default it is enabled.
2012 */
2013 kgsl_regrmw(device, A5XX_RB_DBG_ECO_CNT, 0, (1 << 9));
2014 }
2015 /*
2016 * Disable UCHE global filter as SP can invalidate/flush
2017 * independently
2018 */
2019 kgsl_regwrite(device, A5XX_UCHE_MODE_CNTL, BIT(29));
2020 /* Set the USE_RETENTION_FLOPS chicken bit */
2021 kgsl_regwrite(device, A5XX_CP_CHICKEN_DBG, 0x02000000);
2022
2023 /* Enable ISDB mode if requested */
2024 if (test_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv)) {
2025 if (!kgsl_active_count_get(device)) {
2026 /*
2027 * Disable ME/PFP split timeouts when the debugger is
2028 * enabled because the CP doesn't know when a shader is
2029 * in active debug
2030 */
2031 kgsl_regwrite(device, A5XX_RBBM_AHB_CNTL1, 0x06FFFFFF);
2032
2033 /* Force the SP0/SP1 clocks on to enable ISDB */
2034 kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL_SP0, 0x0);
2035 kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL_SP1, 0x0);
2036 kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL_SP2, 0x0);
2037 kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL_SP3, 0x0);
2038 kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL2_SP0, 0x0);
2039 kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL2_SP1, 0x0);
2040 kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL2_SP2, 0x0);
2041 kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL2_SP3, 0x0);
2042
2043 /* disable HWCG */
2044 kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL, 0x0);
2045 kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, 0x0);
2046 } else
2047 KGSL_CORE_ERR(
2048 "Active count failed while turning on ISDB.");
2049 } else {
2050 /* if not in ISDB mode enable ME/PFP split notification */
2051 kgsl_regwrite(device, A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
2052 }
2053
2054 kgsl_regwrite(device, A5XX_RBBM_AHB_CNTL2, 0x0000003F);
2055
2056 if (!of_property_read_u32(device->pdev->dev.of_node,
2057 "qcom,highest-bank-bit", &bit)) {
2058 if (bit >= 13 && bit <= 16) {
2059 bit = (bit - 13) & 0x03;
2060
2061 /*
2062 * Program the highest DDR bank bit that was passed in
2063 * from the DT in a handful of registers. Some of these
2064 * registers will also be written by the UMD, but we
2065 * want to program them in case we happen to use the
2066 * UCHE before the UMD does
2067 */
2068
2069 kgsl_regwrite(device, A5XX_TPL1_MODE_CNTL, bit << 7);
2070 kgsl_regwrite(device, A5XX_RB_MODE_CNTL, bit << 1);
2071 if (adreno_is_a540(adreno_dev) ||
2072 adreno_is_a512(adreno_dev))
2073 kgsl_regwrite(device, A5XX_UCHE_DBG_ECO_CNTL_2,
2074 bit);
2075 }
2076
2077 }
2078
Hareesh Gundu41582a12017-07-25 13:03:27 +05302079 /* Disable All flat shading optimization */
2080 kgsl_regrmw(device, A5XX_VPC_DBG_ECO_CNTL, 0, 0x1 << 10);
2081
Shrenuj Bansala419c792016-10-20 14:05:11 -07002082 /*
2083 * VPC corner case with local memory load kill leads to corrupt
2084 * internal state. Normal Disable does not work for all a5x chips.
2085 * So do the following setting to disable it.
2086 */
2087 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_DISABLE_LMLOADKILL)) {
2088 kgsl_regrmw(device, A5XX_VPC_DBG_ECO_CNTL, 0, 0x1 << 23);
2089 kgsl_regrmw(device, A5XX_HLSQ_DBG_ECO_CNTL, 0x1 << 18, 0);
2090 }
2091
2092 a5xx_preemption_start(adreno_dev);
2093 a5xx_protect_init(adreno_dev);
2094}
2095
2096/*
2097 * Follow the ME_INIT sequence with a preemption yield to allow the GPU to move
2098 * to a different ringbuffer, if desired
2099 */
2100static int _preemption_init(
2101 struct adreno_device *adreno_dev,
2102 struct adreno_ringbuffer *rb, unsigned int *cmds,
2103 struct kgsl_context *context)
2104{
2105 unsigned int *cmds_orig = cmds;
2106 uint64_t gpuaddr = rb->preemption_desc.gpuaddr;
2107
2108 /* Turn CP protection OFF */
2109 *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
2110 *cmds++ = 0;
2111 /*
2112 * CP during context switch will save context switch info to
2113 * a5xx_cp_preemption_record pointed by CONTEXT_SWITCH_SAVE_ADDR
2114 */
2115 *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 1);
2116 *cmds++ = lower_32_bits(gpuaddr);
2117 *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI, 1);
2118 *cmds++ = upper_32_bits(gpuaddr);
2119
2120 /* Turn CP protection ON */
2121 *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
2122 *cmds++ = 1;
2123
2124 *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_GLOBAL, 1);
2125 *cmds++ = 0;
2126
2127 *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
2128 *cmds++ = 1;
2129
2130 /* Enable yield in RB only */
2131 *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
2132 *cmds++ = 1;
2133
2134 *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
2135 cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
2136 *cmds++ = 0;
2137 /* generate interrupt on preemption completion */
2138 *cmds++ = 1;
2139
2140 return cmds - cmds_orig;
2141}
2142
2143static int a5xx_post_start(struct adreno_device *adreno_dev)
2144{
2145 int ret;
2146 unsigned int *cmds, *start;
2147 struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
2148
2149 if (!adreno_is_a530(adreno_dev) &&
2150 !adreno_is_preemption_enabled(adreno_dev))
2151 return 0;
2152
2153 cmds = adreno_ringbuffer_allocspace(rb, 42);
2154 if (IS_ERR(cmds)) {
2155 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2156
2157 KGSL_DRV_ERR(device, "error allocating preemtion init cmds");
2158 return PTR_ERR(cmds);
2159 }
2160 start = cmds;
2161
2162 /*
2163 * Send a pipeline stat event whenever the GPU gets powered up
2164 * to cause misbehaving perf counters to start ticking
2165 */
2166 if (adreno_is_a530(adreno_dev)) {
2167 *cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
2168 *cmds++ = 0xF;
2169 }
2170
2171 if (adreno_is_preemption_enabled(adreno_dev))
2172 cmds += _preemption_init(adreno_dev, rb, cmds, NULL);
2173
2174 rb->_wptr = rb->_wptr - (42 - (cmds - start));
2175
2176 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
2177 if (ret)
Carter Cooper8567af02017-03-15 14:22:03 -06002178 adreno_spin_idle_debug(adreno_dev,
Shrenuj Bansala419c792016-10-20 14:05:11 -07002179 "hw initialization failed to idle\n");
2180
2181 return ret;
2182}
2183
2184static int a5xx_gpmu_init(struct adreno_device *adreno_dev)
2185{
2186 int ret;
2187
2188 /* Set up LM before initializing the GPMU */
2189 a5xx_lm_init(adreno_dev);
2190
2191 /* Enable SPTP based power collapse before enabling GPMU */
2192 a5xx_enable_pc(adreno_dev);
2193
2194 ret = a5xx_gpmu_start(adreno_dev);
2195 if (ret)
2196 return ret;
2197
2198 /* Enable limits management */
2199 a5xx_lm_enable(adreno_dev);
2200 return 0;
2201}
2202
Shrenuj Bansala419c792016-10-20 14:05:11 -07002203/*
2204 * a5xx_microcode_load() - Load microcode
2205 * @adreno_dev: Pointer to adreno device
2206 */
2207static int a5xx_microcode_load(struct adreno_device *adreno_dev)
2208{
2209 void *ptr;
2210 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002211 struct adreno_firmware *pm4_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
2212 struct adreno_firmware *pfp_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002213 uint64_t gpuaddr;
Rajesh Kemisetti564ede42018-05-02 15:32:38 +05302214 int ret = 0, zap_retry = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002215
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002216 gpuaddr = pm4_fw->memdesc.gpuaddr;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002217 kgsl_regwrite(device, A5XX_CP_PM4_INSTR_BASE_LO,
2218 lower_32_bits(gpuaddr));
2219 kgsl_regwrite(device, A5XX_CP_PM4_INSTR_BASE_HI,
2220 upper_32_bits(gpuaddr));
2221
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002222 gpuaddr = pfp_fw->memdesc.gpuaddr;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002223 kgsl_regwrite(device, A5XX_CP_PFP_INSTR_BASE_LO,
2224 lower_32_bits(gpuaddr));
2225 kgsl_regwrite(device, A5XX_CP_PFP_INSTR_BASE_HI,
2226 upper_32_bits(gpuaddr));
2227
2228 /*
Sunil Khatri3e0fd3e2018-04-12 18:00:07 +05302229 * Do not invoke to load zap shader if MMU does
2230 * not support secure mode.
2231 */
2232 if (!device->mmu.secured)
2233 return 0;
2234
2235 /*
Shrenuj Bansala419c792016-10-20 14:05:11 -07002236 * Resume call to write the zap shader base address into the
2237 * appropriate register,
2238 * skip if retention is supported for the CPZ register
2239 */
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -06002240 if (adreno_dev->zap_loaded && !(ADRENO_FEATURE(adreno_dev,
Shrenuj Bansala419c792016-10-20 14:05:11 -07002241 ADRENO_CPZ_RETENTION))) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07002242 struct scm_desc desc = {0};
2243
2244 desc.args[0] = 0;
2245 desc.args[1] = 13;
2246 desc.arginfo = SCM_ARGS(2);
2247
2248 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, 0xA), &desc);
2249 if (ret) {
2250 pr_err("SCM resume call failed with error %d\n", ret);
2251 return ret;
2252 }
2253
2254 }
2255
2256 /* Load the zap shader firmware through PIL if its available */
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -06002257 if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
Rajesh Kemisetti564ede42018-05-02 15:32:38 +05302258 /*
2259 * subsystem_get() may return -EAGAIN in case system is busy
2260 * and unable to load the firmware. So keep trying since this
2261 * is not a fatal error.
2262 */
2263 do {
2264 ret = 0;
2265 ptr = subsystem_get(adreno_dev->gpucore->zap_name);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002266
Rajesh Kemisetti564ede42018-05-02 15:32:38 +05302267 /* Return error if the zap shader cannot be loaded */
2268 if (IS_ERR_OR_NULL(ptr)) {
2269 ret = (ptr == NULL) ? -ENODEV : PTR_ERR(ptr);
2270 ptr = NULL;
2271 } else
2272 adreno_dev->zap_loaded = 1;
2273 } while ((ret == -EAGAIN) && (zap_retry++ < ZAP_RETRY_MAX));
Shrenuj Bansala419c792016-10-20 14:05:11 -07002274 }
2275
Rajesh Kemisetti564ede42018-05-02 15:32:38 +05302276 return ret;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002277}
2278
2279static int _me_init_ucode_workarounds(struct adreno_device *adreno_dev)
2280{
2281 switch (ADRENO_GPUREV(adreno_dev)) {
2282 case ADRENO_REV_A510:
2283 return 0x00000001; /* Ucode workaround for token end syncs */
Sunil Khatri670c7322018-06-22 11:37:29 +05302284 case ADRENO_REV_A504:
Shrenuj Bansala419c792016-10-20 14:05:11 -07002285 case ADRENO_REV_A505:
2286 case ADRENO_REV_A506:
2287 case ADRENO_REV_A530:
2288 /*
2289 * Ucode workarounds for token end syncs,
2290 * WFI after every direct-render 3D mode draw and
2291 * WFI after every 2D Mode 3 draw.
2292 */
2293 return 0x0000000B;
2294 case ADRENO_REV_A540:
2295 /*
2296 * WFI after every direct-render 3D mode draw and
2297 * WFI after every 2D Mode 3 draw. This is needed
2298 * only on a540v1.
2299 */
2300 if (adreno_is_a540v1(adreno_dev))
2301 return 0x0000000A;
2302 default:
2303 return 0x00000000; /* No ucode workarounds enabled */
2304 }
2305}
2306
2307/*
2308 * CP_INIT_MAX_CONTEXT bit tells if the multiple hardware contexts can
2309 * be used at once of if they should be serialized
2310 */
2311#define CP_INIT_MAX_CONTEXT BIT(0)
2312
2313/* Enables register protection mode */
2314#define CP_INIT_ERROR_DETECTION_CONTROL BIT(1)
2315
2316/* Header dump information */
2317#define CP_INIT_HEADER_DUMP BIT(2) /* Reserved */
2318
2319/* Default Reset states enabled for PFP and ME */
2320#define CP_INIT_DEFAULT_RESET_STATE BIT(3)
2321
2322/* Drawcall filter range */
2323#define CP_INIT_DRAWCALL_FILTER_RANGE BIT(4)
2324
2325/* Ucode workaround masks */
2326#define CP_INIT_UCODE_WORKAROUND_MASK BIT(5)
2327
2328#define CP_INIT_MASK (CP_INIT_MAX_CONTEXT | \
2329 CP_INIT_ERROR_DETECTION_CONTROL | \
2330 CP_INIT_HEADER_DUMP | \
2331 CP_INIT_DEFAULT_RESET_STATE | \
2332 CP_INIT_UCODE_WORKAROUND_MASK)
2333
2334static void _set_ordinals(struct adreno_device *adreno_dev,
2335 unsigned int *cmds, unsigned int count)
2336{
2337 unsigned int *start = cmds;
2338
2339 /* Enabled ordinal mask */
2340 *cmds++ = CP_INIT_MASK;
2341
2342 if (CP_INIT_MASK & CP_INIT_MAX_CONTEXT) {
2343 /*
2344 * Multiple HW ctxs are unreliable on a530v1,
2345 * use single hw context.
2346 * Use multiple contexts if bit set, otherwise serialize:
2347 * 3D (bit 0) 2D (bit 1)
2348 */
2349 if (adreno_is_a530v1(adreno_dev))
2350 *cmds++ = 0x00000000;
2351 else
2352 *cmds++ = 0x00000003;
2353 }
2354
2355 if (CP_INIT_MASK & CP_INIT_ERROR_DETECTION_CONTROL)
2356 *cmds++ = 0x20000000;
2357
2358 if (CP_INIT_MASK & CP_INIT_HEADER_DUMP) {
2359 /* Header dump address */
2360 *cmds++ = 0x00000000;
2361 /* Header dump enable and dump size */
2362 *cmds++ = 0x00000000;
2363 }
2364
2365 if (CP_INIT_MASK & CP_INIT_DRAWCALL_FILTER_RANGE) {
2366 /* Start range */
2367 *cmds++ = 0x00000000;
2368 /* End range (inclusive) */
2369 *cmds++ = 0x00000000;
2370 }
2371
2372 if (CP_INIT_MASK & CP_INIT_UCODE_WORKAROUND_MASK)
2373 *cmds++ = _me_init_ucode_workarounds(adreno_dev);
2374
2375 /* Pad rest of the cmds with 0's */
2376 while ((unsigned int)(cmds - start) < count)
2377 *cmds++ = 0x0;
2378}
2379
Carter Cooper1d8f5472017-03-15 15:01:09 -06002380int a5xx_critical_packet_submit(struct adreno_device *adreno_dev,
Shrenuj Bansala419c792016-10-20 14:05:11 -07002381 struct adreno_ringbuffer *rb)
2382{
2383 unsigned int *cmds;
2384 int ret;
2385
2386 if (!critical_packet_constructed)
2387 return 0;
2388
2389 cmds = adreno_ringbuffer_allocspace(rb, 4);
2390 if (IS_ERR(cmds))
2391 return PTR_ERR(cmds);
2392
2393 *cmds++ = cp_mem_packet(adreno_dev, CP_INDIRECT_BUFFER_PFE, 2, 1);
2394 cmds += cp_gpuaddr(adreno_dev, cmds, crit_pkts.gpuaddr);
2395 *cmds++ = crit_pkts_dwords;
2396
2397 ret = adreno_ringbuffer_submit_spin(rb, NULL, 20);
2398 if (ret)
Carter Cooper8567af02017-03-15 14:22:03 -06002399 adreno_spin_idle_debug(adreno_dev,
Shrenuj Bansala419c792016-10-20 14:05:11 -07002400 "Critical packet submission failed to idle\n");
2401
2402 return ret;
2403}
2404
2405/*
2406 * a5xx_send_me_init() - Initialize ringbuffer
2407 * @adreno_dev: Pointer to adreno device
2408 * @rb: Pointer to the ringbuffer of device
2409 *
2410 * Submit commands for ME initialization,
2411 */
2412static int a5xx_send_me_init(struct adreno_device *adreno_dev,
2413 struct adreno_ringbuffer *rb)
2414{
2415 unsigned int *cmds;
2416 int ret;
2417
2418 cmds = adreno_ringbuffer_allocspace(rb, 9);
2419 if (IS_ERR(cmds))
2420 return PTR_ERR(cmds);
2421 if (cmds == NULL)
2422 return -ENOSPC;
2423
2424 *cmds++ = cp_type7_packet(CP_ME_INIT, 8);
2425
2426 _set_ordinals(adreno_dev, cmds, 8);
2427
2428 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
2429 if (ret)
Carter Cooper8567af02017-03-15 14:22:03 -06002430 adreno_spin_idle_debug(adreno_dev,
Shrenuj Bansala419c792016-10-20 14:05:11 -07002431 "CP initialization failed to idle\n");
2432
2433 return ret;
2434}
2435
Shrenuj Bansala419c792016-10-20 14:05:11 -07002436/*
2437 * a5xx_rb_start() - Start the ringbuffer
2438 * @adreno_dev: Pointer to adreno device
2439 * @start_type: Warm or cold start
2440 */
2441static int a5xx_rb_start(struct adreno_device *adreno_dev,
2442 unsigned int start_type)
2443{
2444 struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
2445 struct kgsl_device *device = &adreno_dev->dev;
2446 uint64_t addr;
2447 int ret;
2448
2449 addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
2450
2451 adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
2452 ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
2453
2454 /*
2455 * The size of the ringbuffer in the hardware is the log2
2456 * representation of the size in quadwords (sizedwords / 2).
2457 * Also disable the host RPTR shadow register as it might be unreliable
2458 * in certain circumstances.
2459 */
2460
2461 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
2462 A5XX_CP_RB_CNTL_DEFAULT);
2463
Deepak Kumar756d6a92017-11-28 16:58:29 +05302464 adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
2465 ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002466
2467 ret = a5xx_microcode_load(adreno_dev);
2468 if (ret)
2469 return ret;
2470
2471 /* clear ME_HALT to start micro engine */
2472 adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, 0);
2473
2474 ret = a5xx_send_me_init(adreno_dev, rb);
2475 if (ret)
2476 return ret;
2477
2478 /* GPU comes up in secured mode, make it unsecured by default */
Carter Cooper1d8f5472017-03-15 15:01:09 -06002479 ret = adreno_set_unsecured_mode(adreno_dev, rb);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002480 if (ret)
2481 return ret;
2482
2483 ret = a5xx_gpmu_init(adreno_dev);
2484 if (ret)
2485 return ret;
2486
2487 a5xx_post_start(adreno_dev);
2488
2489 return 0;
2490}
2491
2492static int _load_firmware(struct kgsl_device *device, const char *fwfile,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002493 struct adreno_firmware *firmware)
Shrenuj Bansala419c792016-10-20 14:05:11 -07002494{
2495 const struct firmware *fw = NULL;
2496 int ret;
2497
2498 ret = request_firmware(&fw, fwfile, device->dev);
2499
2500 if (ret) {
2501 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
2502 fwfile, ret);
2503 return ret;
2504 }
2505
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002506 ret = kgsl_allocate_global(device, &firmware->memdesc, fw->size - 4,
Shrenuj Bansala419c792016-10-20 14:05:11 -07002507 KGSL_MEMFLAGS_GPUREADONLY, 0, "ucode");
2508
2509 if (ret)
2510 goto done;
2511
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002512 memcpy(firmware->memdesc.hostptr, &fw->data[4], fw->size - 4);
2513 firmware->size = (fw->size - 4) / sizeof(uint32_t);
2514 firmware->version = *(unsigned int *)&fw->data[4];
Shrenuj Bansala419c792016-10-20 14:05:11 -07002515
2516done:
2517 release_firmware(fw);
2518
2519 return ret;
2520}
2521
2522/*
2523 * a5xx_microcode_read() - Read microcode
2524 * @adreno_dev: Pointer to adreno device
2525 */
2526static int a5xx_microcode_read(struct adreno_device *adreno_dev)
2527{
2528 int ret;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002529 struct adreno_firmware *pm4_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
2530 struct adreno_firmware *pfp_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002531
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002532 if (pm4_fw->memdesc.hostptr == NULL) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07002533 ret = _load_firmware(KGSL_DEVICE(adreno_dev),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002534 adreno_dev->gpucore->pm4fw_name, pm4_fw);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002535 if (ret)
2536 return ret;
2537 }
2538
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002539 if (pfp_fw->memdesc.hostptr == NULL) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07002540 ret = _load_firmware(KGSL_DEVICE(adreno_dev),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002541 adreno_dev->gpucore->pfpfw_name, pfp_fw);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002542 if (ret)
2543 return ret;
2544 }
2545
2546 ret = _load_gpmu_firmware(adreno_dev);
2547 if (ret)
2548 return ret;
2549
2550 _load_regfile(adreno_dev);
2551
2552 return ret;
2553}
2554
2555static struct adreno_perfcount_register a5xx_perfcounters_cp[] = {
2556 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CP_0_LO,
2557 A5XX_RBBM_PERFCTR_CP_0_HI, 0, A5XX_CP_PERFCTR_CP_SEL_0 },
2558 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CP_1_LO,
2559 A5XX_RBBM_PERFCTR_CP_1_HI, 1, A5XX_CP_PERFCTR_CP_SEL_1 },
2560 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CP_2_LO,
2561 A5XX_RBBM_PERFCTR_CP_2_HI, 2, A5XX_CP_PERFCTR_CP_SEL_2 },
2562 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CP_3_LO,
2563 A5XX_RBBM_PERFCTR_CP_3_HI, 3, A5XX_CP_PERFCTR_CP_SEL_3 },
2564 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CP_4_LO,
2565 A5XX_RBBM_PERFCTR_CP_4_HI, 4, A5XX_CP_PERFCTR_CP_SEL_4 },
2566 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CP_5_LO,
2567 A5XX_RBBM_PERFCTR_CP_5_HI, 5, A5XX_CP_PERFCTR_CP_SEL_5 },
2568 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CP_6_LO,
2569 A5XX_RBBM_PERFCTR_CP_6_HI, 6, A5XX_CP_PERFCTR_CP_SEL_6 },
2570 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CP_7_LO,
2571 A5XX_RBBM_PERFCTR_CP_7_HI, 7, A5XX_CP_PERFCTR_CP_SEL_7 },
2572};
2573
2574/*
2575 * Note that PERFCTR_RBBM_0 is missing - it is used to emulate the PWR counters.
2576 * See below.
2577 */
2578static struct adreno_perfcount_register a5xx_perfcounters_rbbm[] = {
2579 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RBBM_1_LO,
2580 A5XX_RBBM_PERFCTR_RBBM_1_HI, 9, A5XX_RBBM_PERFCTR_RBBM_SEL_1 },
2581 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RBBM_2_LO,
2582 A5XX_RBBM_PERFCTR_RBBM_2_HI, 10, A5XX_RBBM_PERFCTR_RBBM_SEL_2 },
2583 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RBBM_3_LO,
2584 A5XX_RBBM_PERFCTR_RBBM_3_HI, 11, A5XX_RBBM_PERFCTR_RBBM_SEL_3 },
2585};
2586
2587static struct adreno_perfcount_register a5xx_perfcounters_pc[] = {
2588 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_PC_0_LO,
2589 A5XX_RBBM_PERFCTR_PC_0_HI, 12, A5XX_PC_PERFCTR_PC_SEL_0 },
2590 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_PC_1_LO,
2591 A5XX_RBBM_PERFCTR_PC_1_HI, 13, A5XX_PC_PERFCTR_PC_SEL_1 },
2592 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_PC_2_LO,
2593 A5XX_RBBM_PERFCTR_PC_2_HI, 14, A5XX_PC_PERFCTR_PC_SEL_2 },
2594 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_PC_3_LO,
2595 A5XX_RBBM_PERFCTR_PC_3_HI, 15, A5XX_PC_PERFCTR_PC_SEL_3 },
2596 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_PC_4_LO,
2597 A5XX_RBBM_PERFCTR_PC_4_HI, 16, A5XX_PC_PERFCTR_PC_SEL_4 },
2598 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_PC_5_LO,
2599 A5XX_RBBM_PERFCTR_PC_5_HI, 17, A5XX_PC_PERFCTR_PC_SEL_5 },
2600 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_PC_6_LO,
2601 A5XX_RBBM_PERFCTR_PC_6_HI, 18, A5XX_PC_PERFCTR_PC_SEL_6 },
2602 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_PC_7_LO,
2603 A5XX_RBBM_PERFCTR_PC_7_HI, 19, A5XX_PC_PERFCTR_PC_SEL_7 },
2604};
2605
2606static struct adreno_perfcount_register a5xx_perfcounters_vfd[] = {
2607 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VFD_0_LO,
2608 A5XX_RBBM_PERFCTR_VFD_0_HI, 20, A5XX_VFD_PERFCTR_VFD_SEL_0 },
2609 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VFD_1_LO,
2610 A5XX_RBBM_PERFCTR_VFD_1_HI, 21, A5XX_VFD_PERFCTR_VFD_SEL_1 },
2611 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VFD_2_LO,
2612 A5XX_RBBM_PERFCTR_VFD_2_HI, 22, A5XX_VFD_PERFCTR_VFD_SEL_2 },
2613 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VFD_3_LO,
2614 A5XX_RBBM_PERFCTR_VFD_3_HI, 23, A5XX_VFD_PERFCTR_VFD_SEL_3 },
2615 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VFD_4_LO,
2616 A5XX_RBBM_PERFCTR_VFD_4_HI, 24, A5XX_VFD_PERFCTR_VFD_SEL_4 },
2617 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VFD_5_LO,
2618 A5XX_RBBM_PERFCTR_VFD_5_HI, 25, A5XX_VFD_PERFCTR_VFD_SEL_5 },
2619 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VFD_6_LO,
2620 A5XX_RBBM_PERFCTR_VFD_6_HI, 26, A5XX_VFD_PERFCTR_VFD_SEL_6 },
2621 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VFD_7_LO,
2622 A5XX_RBBM_PERFCTR_VFD_7_HI, 27, A5XX_VFD_PERFCTR_VFD_SEL_7 },
2623};
2624
2625static struct adreno_perfcount_register a5xx_perfcounters_hlsq[] = {
2626 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_HLSQ_0_LO,
2627 A5XX_RBBM_PERFCTR_HLSQ_0_HI, 28, A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
2628 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_HLSQ_1_LO,
2629 A5XX_RBBM_PERFCTR_HLSQ_1_HI, 29, A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
2630 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_HLSQ_2_LO,
2631 A5XX_RBBM_PERFCTR_HLSQ_2_HI, 30, A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
2632 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_HLSQ_3_LO,
2633 A5XX_RBBM_PERFCTR_HLSQ_3_HI, 31, A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
2634 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_HLSQ_4_LO,
2635 A5XX_RBBM_PERFCTR_HLSQ_4_HI, 32, A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
2636 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_HLSQ_5_LO,
2637 A5XX_RBBM_PERFCTR_HLSQ_5_HI, 33, A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
2638 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_HLSQ_6_LO,
2639 A5XX_RBBM_PERFCTR_HLSQ_6_HI, 34, A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 },
2640 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_HLSQ_7_LO,
2641 A5XX_RBBM_PERFCTR_HLSQ_7_HI, 35, A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 },
2642};
2643
2644static struct adreno_perfcount_register a5xx_perfcounters_vpc[] = {
2645 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VPC_0_LO,
2646 A5XX_RBBM_PERFCTR_VPC_0_HI, 36, A5XX_VPC_PERFCTR_VPC_SEL_0 },
2647 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VPC_1_LO,
2648 A5XX_RBBM_PERFCTR_VPC_1_HI, 37, A5XX_VPC_PERFCTR_VPC_SEL_1 },
2649 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VPC_2_LO,
2650 A5XX_RBBM_PERFCTR_VPC_2_HI, 38, A5XX_VPC_PERFCTR_VPC_SEL_2 },
2651 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VPC_3_LO,
2652 A5XX_RBBM_PERFCTR_VPC_3_HI, 39, A5XX_VPC_PERFCTR_VPC_SEL_3 },
2653};
2654
2655static struct adreno_perfcount_register a5xx_perfcounters_ccu[] = {
2656 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CCU_0_LO,
2657 A5XX_RBBM_PERFCTR_CCU_0_HI, 40, A5XX_RB_PERFCTR_CCU_SEL_0 },
2658 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CCU_1_LO,
2659 A5XX_RBBM_PERFCTR_CCU_1_HI, 41, A5XX_RB_PERFCTR_CCU_SEL_1 },
2660 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CCU_2_LO,
2661 A5XX_RBBM_PERFCTR_CCU_2_HI, 42, A5XX_RB_PERFCTR_CCU_SEL_2 },
2662 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CCU_3_LO,
2663 A5XX_RBBM_PERFCTR_CCU_3_HI, 43, A5XX_RB_PERFCTR_CCU_SEL_3 },
2664};
2665
2666static struct adreno_perfcount_register a5xx_perfcounters_tse[] = {
2667 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TSE_0_LO,
2668 A5XX_RBBM_PERFCTR_TSE_0_HI, 44, A5XX_GRAS_PERFCTR_TSE_SEL_0 },
2669 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TSE_1_LO,
2670 A5XX_RBBM_PERFCTR_TSE_1_HI, 45, A5XX_GRAS_PERFCTR_TSE_SEL_1 },
2671 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TSE_2_LO,
2672 A5XX_RBBM_PERFCTR_TSE_2_HI, 46, A5XX_GRAS_PERFCTR_TSE_SEL_2 },
2673 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TSE_3_LO,
2674 A5XX_RBBM_PERFCTR_TSE_3_HI, 47, A5XX_GRAS_PERFCTR_TSE_SEL_3 },
2675};
2676
2677
2678static struct adreno_perfcount_register a5xx_perfcounters_ras[] = {
2679 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RAS_0_LO,
2680 A5XX_RBBM_PERFCTR_RAS_0_HI, 48, A5XX_GRAS_PERFCTR_RAS_SEL_0 },
2681 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RAS_1_LO,
2682 A5XX_RBBM_PERFCTR_RAS_1_HI, 49, A5XX_GRAS_PERFCTR_RAS_SEL_1 },
2683 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RAS_2_LO,
2684 A5XX_RBBM_PERFCTR_RAS_2_HI, 50, A5XX_GRAS_PERFCTR_RAS_SEL_2 },
2685 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RAS_3_LO,
2686 A5XX_RBBM_PERFCTR_RAS_3_HI, 51, A5XX_GRAS_PERFCTR_RAS_SEL_3 },
2687};
2688
2689static struct adreno_perfcount_register a5xx_perfcounters_uche[] = {
2690 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_UCHE_0_LO,
2691 A5XX_RBBM_PERFCTR_UCHE_0_HI, 52, A5XX_UCHE_PERFCTR_UCHE_SEL_0 },
2692 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_UCHE_1_LO,
2693 A5XX_RBBM_PERFCTR_UCHE_1_HI, 53, A5XX_UCHE_PERFCTR_UCHE_SEL_1 },
2694 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_UCHE_2_LO,
2695 A5XX_RBBM_PERFCTR_UCHE_2_HI, 54, A5XX_UCHE_PERFCTR_UCHE_SEL_2 },
2696 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_UCHE_3_LO,
2697 A5XX_RBBM_PERFCTR_UCHE_3_HI, 55, A5XX_UCHE_PERFCTR_UCHE_SEL_3 },
2698 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_UCHE_4_LO,
2699 A5XX_RBBM_PERFCTR_UCHE_4_HI, 56, A5XX_UCHE_PERFCTR_UCHE_SEL_4 },
2700 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_UCHE_5_LO,
2701 A5XX_RBBM_PERFCTR_UCHE_5_HI, 57, A5XX_UCHE_PERFCTR_UCHE_SEL_5 },
2702 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_UCHE_6_LO,
2703 A5XX_RBBM_PERFCTR_UCHE_6_HI, 58, A5XX_UCHE_PERFCTR_UCHE_SEL_6 },
2704 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_UCHE_7_LO,
2705 A5XX_RBBM_PERFCTR_UCHE_7_HI, 59, A5XX_UCHE_PERFCTR_UCHE_SEL_7 },
2706};
2707
2708static struct adreno_perfcount_register a5xx_perfcounters_tp[] = {
2709 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TP_0_LO,
2710 A5XX_RBBM_PERFCTR_TP_0_HI, 60, A5XX_TPL1_PERFCTR_TP_SEL_0 },
2711 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TP_1_LO,
2712 A5XX_RBBM_PERFCTR_TP_1_HI, 61, A5XX_TPL1_PERFCTR_TP_SEL_1 },
2713 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TP_2_LO,
2714 A5XX_RBBM_PERFCTR_TP_2_HI, 62, A5XX_TPL1_PERFCTR_TP_SEL_2 },
2715 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TP_3_LO,
2716 A5XX_RBBM_PERFCTR_TP_3_HI, 63, A5XX_TPL1_PERFCTR_TP_SEL_3 },
2717 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TP_4_LO,
2718 A5XX_RBBM_PERFCTR_TP_4_HI, 64, A5XX_TPL1_PERFCTR_TP_SEL_4 },
2719 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TP_5_LO,
2720 A5XX_RBBM_PERFCTR_TP_5_HI, 65, A5XX_TPL1_PERFCTR_TP_SEL_5 },
2721 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TP_6_LO,
2722 A5XX_RBBM_PERFCTR_TP_6_HI, 66, A5XX_TPL1_PERFCTR_TP_SEL_6 },
2723 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_TP_7_LO,
2724 A5XX_RBBM_PERFCTR_TP_7_HI, 67, A5XX_TPL1_PERFCTR_TP_SEL_7 },
2725};
2726
2727static struct adreno_perfcount_register a5xx_perfcounters_sp[] = {
2728 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_0_LO,
2729 A5XX_RBBM_PERFCTR_SP_0_HI, 68, A5XX_SP_PERFCTR_SP_SEL_0 },
2730 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_1_LO,
2731 A5XX_RBBM_PERFCTR_SP_1_HI, 69, A5XX_SP_PERFCTR_SP_SEL_1 },
2732 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_2_LO,
2733 A5XX_RBBM_PERFCTR_SP_2_HI, 70, A5XX_SP_PERFCTR_SP_SEL_2 },
2734 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_3_LO,
2735 A5XX_RBBM_PERFCTR_SP_3_HI, 71, A5XX_SP_PERFCTR_SP_SEL_3 },
2736 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_4_LO,
2737 A5XX_RBBM_PERFCTR_SP_4_HI, 72, A5XX_SP_PERFCTR_SP_SEL_4 },
2738 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_5_LO,
2739 A5XX_RBBM_PERFCTR_SP_5_HI, 73, A5XX_SP_PERFCTR_SP_SEL_5 },
2740 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_6_LO,
2741 A5XX_RBBM_PERFCTR_SP_6_HI, 74, A5XX_SP_PERFCTR_SP_SEL_6 },
2742 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_7_LO,
2743 A5XX_RBBM_PERFCTR_SP_7_HI, 75, A5XX_SP_PERFCTR_SP_SEL_7 },
2744 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_8_LO,
2745 A5XX_RBBM_PERFCTR_SP_8_HI, 76, A5XX_SP_PERFCTR_SP_SEL_8 },
2746 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_9_LO,
2747 A5XX_RBBM_PERFCTR_SP_9_HI, 77, A5XX_SP_PERFCTR_SP_SEL_9 },
2748 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_10_LO,
2749 A5XX_RBBM_PERFCTR_SP_10_HI, 78, A5XX_SP_PERFCTR_SP_SEL_10 },
2750 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_SP_11_LO,
2751 A5XX_RBBM_PERFCTR_SP_11_HI, 79, A5XX_SP_PERFCTR_SP_SEL_11 },
2752};
2753
2754static struct adreno_perfcount_register a5xx_perfcounters_rb[] = {
2755 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RB_0_LO,
2756 A5XX_RBBM_PERFCTR_RB_0_HI, 80, A5XX_RB_PERFCTR_RB_SEL_0 },
2757 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RB_1_LO,
2758 A5XX_RBBM_PERFCTR_RB_1_HI, 81, A5XX_RB_PERFCTR_RB_SEL_1 },
2759 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RB_2_LO,
2760 A5XX_RBBM_PERFCTR_RB_2_HI, 82, A5XX_RB_PERFCTR_RB_SEL_2 },
2761 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RB_3_LO,
2762 A5XX_RBBM_PERFCTR_RB_3_HI, 83, A5XX_RB_PERFCTR_RB_SEL_3 },
2763 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RB_4_LO,
2764 A5XX_RBBM_PERFCTR_RB_4_HI, 84, A5XX_RB_PERFCTR_RB_SEL_4 },
2765 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RB_5_LO,
2766 A5XX_RBBM_PERFCTR_RB_5_HI, 85, A5XX_RB_PERFCTR_RB_SEL_5 },
2767 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RB_6_LO,
2768 A5XX_RBBM_PERFCTR_RB_6_HI, 86, A5XX_RB_PERFCTR_RB_SEL_6 },
2769 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RB_7_LO,
2770 A5XX_RBBM_PERFCTR_RB_7_HI, 87, A5XX_RB_PERFCTR_RB_SEL_7 },
2771};
2772
2773static struct adreno_perfcount_register a5xx_perfcounters_vsc[] = {
2774 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VSC_0_LO,
2775 A5XX_RBBM_PERFCTR_VSC_0_HI, 88, A5XX_VSC_PERFCTR_VSC_SEL_0 },
2776 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_VSC_1_LO,
2777 A5XX_RBBM_PERFCTR_VSC_1_HI, 89, A5XX_VSC_PERFCTR_VSC_SEL_1 },
2778};
2779
2780static struct adreno_perfcount_register a5xx_perfcounters_lrz[] = {
2781 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_LRZ_0_LO,
2782 A5XX_RBBM_PERFCTR_LRZ_0_HI, 90, A5XX_GRAS_PERFCTR_LRZ_SEL_0 },
2783 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_LRZ_1_LO,
2784 A5XX_RBBM_PERFCTR_LRZ_1_HI, 91, A5XX_GRAS_PERFCTR_LRZ_SEL_1 },
2785 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_LRZ_2_LO,
2786 A5XX_RBBM_PERFCTR_LRZ_2_HI, 92, A5XX_GRAS_PERFCTR_LRZ_SEL_2 },
2787 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_LRZ_3_LO,
2788 A5XX_RBBM_PERFCTR_LRZ_3_HI, 93, A5XX_GRAS_PERFCTR_LRZ_SEL_3 },
2789};
2790
2791static struct adreno_perfcount_register a5xx_perfcounters_cmp[] = {
2792 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CMP_0_LO,
2793 A5XX_RBBM_PERFCTR_CMP_0_HI, 94, A5XX_RB_PERFCTR_CMP_SEL_0 },
2794 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CMP_1_LO,
2795 A5XX_RBBM_PERFCTR_CMP_1_HI, 95, A5XX_RB_PERFCTR_CMP_SEL_1 },
2796 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CMP_2_LO,
2797 A5XX_RBBM_PERFCTR_CMP_2_HI, 96, A5XX_RB_PERFCTR_CMP_SEL_2 },
2798 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_CMP_3_LO,
2799 A5XX_RBBM_PERFCTR_CMP_3_HI, 97, A5XX_RB_PERFCTR_CMP_SEL_3 },
2800};
2801
2802static struct adreno_perfcount_register a5xx_perfcounters_vbif[] = {
2803 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_VBIF_PERF_CNT_LOW0,
2804 A5XX_VBIF_PERF_CNT_HIGH0, -1, A5XX_VBIF_PERF_CNT_SEL0 },
2805 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_VBIF_PERF_CNT_LOW1,
2806 A5XX_VBIF_PERF_CNT_HIGH1, -1, A5XX_VBIF_PERF_CNT_SEL1 },
2807 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_VBIF_PERF_CNT_LOW2,
2808 A5XX_VBIF_PERF_CNT_HIGH2, -1, A5XX_VBIF_PERF_CNT_SEL2 },
2809 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_VBIF_PERF_CNT_LOW3,
2810 A5XX_VBIF_PERF_CNT_HIGH3, -1, A5XX_VBIF_PERF_CNT_SEL3 },
2811};
2812
2813static struct adreno_perfcount_register a5xx_perfcounters_vbif_pwr[] = {
2814 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_VBIF_PERF_PWR_CNT_LOW0,
2815 A5XX_VBIF_PERF_PWR_CNT_HIGH0, -1, A5XX_VBIF_PERF_PWR_CNT_EN0 },
2816 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_VBIF_PERF_PWR_CNT_LOW1,
2817 A5XX_VBIF_PERF_PWR_CNT_HIGH1, -1, A5XX_VBIF_PERF_PWR_CNT_EN1 },
2818 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_VBIF_PERF_PWR_CNT_LOW2,
2819 A5XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A5XX_VBIF_PERF_PWR_CNT_EN2 },
2820};
2821
2822static struct adreno_perfcount_register a5xx_perfcounters_alwayson[] = {
2823 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_ALWAYSON_COUNTER_LO,
2824 A5XX_RBBM_ALWAYSON_COUNTER_HI, -1 },
2825};
2826
2827/*
2828 * 5XX targets don't really have physical PERFCTR_PWR registers - we emulate
2829 * them using similar performance counters from the RBBM block. The difference
2830 * between using this group and the RBBM group is that the RBBM counters are
2831 * reloaded after a power collapse which is not how the PWR counters behaved on
2832 * legacy hardware. In order to limit the disruption on the rest of the system
2833 * we go out of our way to ensure backwards compatibility. Since RBBM counters
2834 * are in short supply, we don't emulate PWR:0 which nobody uses - mark it as
2835 * broken.
2836 */
2837static struct adreno_perfcount_register a5xx_perfcounters_pwr[] = {
2838 { KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
2839 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RBBM_0_LO,
2840 A5XX_RBBM_PERFCTR_RBBM_0_HI, -1, 0},
2841};
2842
2843static struct adreno_perfcount_register a5xx_pwrcounters_sp[] = {
2844 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_SP_POWER_COUNTER_0_LO,
2845 A5XX_SP_POWER_COUNTER_0_HI, -1, A5XX_SP_POWERCTR_SP_SEL_0 },
2846 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_SP_POWER_COUNTER_1_LO,
2847 A5XX_SP_POWER_COUNTER_1_HI, -1, A5XX_SP_POWERCTR_SP_SEL_1 },
2848 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_SP_POWER_COUNTER_2_LO,
2849 A5XX_SP_POWER_COUNTER_2_HI, -1, A5XX_SP_POWERCTR_SP_SEL_2 },
2850 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_SP_POWER_COUNTER_3_LO,
2851 A5XX_SP_POWER_COUNTER_3_HI, -1, A5XX_SP_POWERCTR_SP_SEL_3 },
2852};
2853
2854static struct adreno_perfcount_register a5xx_pwrcounters_tp[] = {
2855 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_TP_POWER_COUNTER_0_LO,
2856 A5XX_TP_POWER_COUNTER_0_HI, -1, A5XX_TPL1_POWERCTR_TP_SEL_0 },
2857 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_TP_POWER_COUNTER_1_LO,
2858 A5XX_TP_POWER_COUNTER_1_HI, -1, A5XX_TPL1_POWERCTR_TP_SEL_1 },
2859 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_TP_POWER_COUNTER_2_LO,
2860 A5XX_TP_POWER_COUNTER_2_HI, -1, A5XX_TPL1_POWERCTR_TP_SEL_2 },
2861 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_TP_POWER_COUNTER_3_LO,
2862 A5XX_TP_POWER_COUNTER_3_HI, -1, A5XX_TPL1_POWERCTR_TP_SEL_3 },
2863};
2864
2865static struct adreno_perfcount_register a5xx_pwrcounters_rb[] = {
2866 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RB_POWER_COUNTER_0_LO,
2867 A5XX_RB_POWER_COUNTER_0_HI, -1, A5XX_RB_POWERCTR_RB_SEL_0 },
2868 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RB_POWER_COUNTER_1_LO,
2869 A5XX_RB_POWER_COUNTER_1_HI, -1, A5XX_RB_POWERCTR_RB_SEL_1 },
2870 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RB_POWER_COUNTER_2_LO,
2871 A5XX_RB_POWER_COUNTER_2_HI, -1, A5XX_RB_POWERCTR_RB_SEL_2 },
2872 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RB_POWER_COUNTER_3_LO,
2873 A5XX_RB_POWER_COUNTER_3_HI, -1, A5XX_RB_POWERCTR_RB_SEL_3 },
2874};
2875
2876static struct adreno_perfcount_register a5xx_pwrcounters_ccu[] = {
2877 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_CCU_POWER_COUNTER_0_LO,
2878 A5XX_CCU_POWER_COUNTER_0_HI, -1, A5XX_RB_POWERCTR_CCU_SEL_0 },
2879 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_CCU_POWER_COUNTER_1_LO,
2880 A5XX_CCU_POWER_COUNTER_1_HI, -1, A5XX_RB_POWERCTR_CCU_SEL_1 },
2881};
2882
2883static struct adreno_perfcount_register a5xx_pwrcounters_uche[] = {
2884 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_UCHE_POWER_COUNTER_0_LO,
2885 A5XX_UCHE_POWER_COUNTER_0_HI, -1,
2886 A5XX_UCHE_POWERCTR_UCHE_SEL_0 },
2887 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_UCHE_POWER_COUNTER_1_LO,
2888 A5XX_UCHE_POWER_COUNTER_1_HI, -1,
2889 A5XX_UCHE_POWERCTR_UCHE_SEL_1 },
2890 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_UCHE_POWER_COUNTER_2_LO,
2891 A5XX_UCHE_POWER_COUNTER_2_HI, -1,
2892 A5XX_UCHE_POWERCTR_UCHE_SEL_2 },
2893 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_UCHE_POWER_COUNTER_3_LO,
2894 A5XX_UCHE_POWER_COUNTER_3_HI, -1,
2895 A5XX_UCHE_POWERCTR_UCHE_SEL_3 },
2896};
2897
2898static struct adreno_perfcount_register a5xx_pwrcounters_cp[] = {
2899 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_CP_POWER_COUNTER_0_LO,
2900 A5XX_CP_POWER_COUNTER_0_HI, -1, A5XX_CP_POWERCTR_CP_SEL_0 },
2901 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_CP_POWER_COUNTER_1_LO,
2902 A5XX_CP_POWER_COUNTER_1_HI, -1, A5XX_CP_POWERCTR_CP_SEL_1 },
2903 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_CP_POWER_COUNTER_2_LO,
2904 A5XX_CP_POWER_COUNTER_2_HI, -1, A5XX_CP_POWERCTR_CP_SEL_2 },
2905 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_CP_POWER_COUNTER_3_LO,
2906 A5XX_CP_POWER_COUNTER_3_HI, -1, A5XX_CP_POWERCTR_CP_SEL_3 },
2907};
2908
2909static struct adreno_perfcount_register a5xx_pwrcounters_gpmu[] = {
2910 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_GPMU_POWER_COUNTER_0_LO,
2911 A5XX_GPMU_POWER_COUNTER_0_HI, -1,
2912 A5XX_GPMU_POWER_COUNTER_SELECT_0 },
2913 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_GPMU_POWER_COUNTER_1_LO,
2914 A5XX_GPMU_POWER_COUNTER_1_HI, -1,
2915 A5XX_GPMU_POWER_COUNTER_SELECT_0 },
2916 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_GPMU_POWER_COUNTER_2_LO,
2917 A5XX_GPMU_POWER_COUNTER_2_HI, -1,
2918 A5XX_GPMU_POWER_COUNTER_SELECT_0 },
2919 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_GPMU_POWER_COUNTER_3_LO,
2920 A5XX_GPMU_POWER_COUNTER_3_HI, -1,
2921 A5XX_GPMU_POWER_COUNTER_SELECT_0 },
2922 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_GPMU_POWER_COUNTER_4_LO,
2923 A5XX_GPMU_POWER_COUNTER_4_HI, -1,
2924 A5XX_GPMU_POWER_COUNTER_SELECT_1 },
2925 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_GPMU_POWER_COUNTER_5_LO,
2926 A5XX_GPMU_POWER_COUNTER_5_HI, -1,
2927 A5XX_GPMU_POWER_COUNTER_SELECT_1 },
2928};
2929
2930static struct adreno_perfcount_register a5xx_pwrcounters_alwayson[] = {
2931 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_GPMU_ALWAYS_ON_COUNTER_LO,
2932 A5XX_GPMU_ALWAYS_ON_COUNTER_HI, -1 },
2933};
2934
2935#define A5XX_PERFCOUNTER_GROUP(offset, name) \
2936 ADRENO_PERFCOUNTER_GROUP(a5xx, offset, name)
2937
2938#define A5XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
2939 ADRENO_PERFCOUNTER_GROUP_FLAGS(a5xx, offset, name, flags)
2940
2941#define A5XX_POWER_COUNTER_GROUP(offset, name) \
2942 ADRENO_POWER_COUNTER_GROUP(a5xx, offset, name)
2943
2944static struct adreno_perfcount_group a5xx_perfcounter_groups
2945 [KGSL_PERFCOUNTER_GROUP_MAX] = {
2946 A5XX_PERFCOUNTER_GROUP(CP, cp),
2947 A5XX_PERFCOUNTER_GROUP(RBBM, rbbm),
2948 A5XX_PERFCOUNTER_GROUP(PC, pc),
2949 A5XX_PERFCOUNTER_GROUP(VFD, vfd),
2950 A5XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
2951 A5XX_PERFCOUNTER_GROUP(VPC, vpc),
2952 A5XX_PERFCOUNTER_GROUP(CCU, ccu),
2953 A5XX_PERFCOUNTER_GROUP(CMP, cmp),
2954 A5XX_PERFCOUNTER_GROUP(TSE, tse),
2955 A5XX_PERFCOUNTER_GROUP(RAS, ras),
2956 A5XX_PERFCOUNTER_GROUP(LRZ, lrz),
2957 A5XX_PERFCOUNTER_GROUP(UCHE, uche),
2958 A5XX_PERFCOUNTER_GROUP(TP, tp),
2959 A5XX_PERFCOUNTER_GROUP(SP, sp),
2960 A5XX_PERFCOUNTER_GROUP(RB, rb),
2961 A5XX_PERFCOUNTER_GROUP(VSC, vsc),
2962 A5XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
2963 ADRENO_PERFCOUNTER_GROUP_FIXED),
2964 A5XX_PERFCOUNTER_GROUP(VBIF, vbif),
2965 A5XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
2966 ADRENO_PERFCOUNTER_GROUP_FIXED),
2967 A5XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
2968 ADRENO_PERFCOUNTER_GROUP_FIXED),
2969 A5XX_POWER_COUNTER_GROUP(SP, sp),
2970 A5XX_POWER_COUNTER_GROUP(TP, tp),
2971 A5XX_POWER_COUNTER_GROUP(RB, rb),
2972 A5XX_POWER_COUNTER_GROUP(CCU, ccu),
2973 A5XX_POWER_COUNTER_GROUP(UCHE, uche),
2974 A5XX_POWER_COUNTER_GROUP(CP, cp),
2975 A5XX_POWER_COUNTER_GROUP(GPMU, gpmu),
2976 A5XX_POWER_COUNTER_GROUP(ALWAYSON, alwayson),
2977};
2978
2979static struct adreno_perfcounters a5xx_perfcounters = {
2980 a5xx_perfcounter_groups,
2981 ARRAY_SIZE(a5xx_perfcounter_groups),
2982};
2983
2984static struct adreno_ft_perf_counters a5xx_ft_perf_counters[] = {
2985 {KGSL_PERFCOUNTER_GROUP_SP, A5XX_SP_ALU_ACTIVE_CYCLES},
2986 {KGSL_PERFCOUNTER_GROUP_SP, A5XX_SP0_ICL1_MISSES},
2987 {KGSL_PERFCOUNTER_GROUP_SP, A5XX_SP_FS_CFLOW_INSTRUCTIONS},
2988 {KGSL_PERFCOUNTER_GROUP_TSE, A5XX_TSE_INPUT_PRIM_NUM},
2989};
2990
2991static unsigned int a5xx_int_bits[ADRENO_INT_BITS_MAX] = {
2992 ADRENO_INT_DEFINE(ADRENO_INT_RBBM_AHB_ERROR, A5XX_INT_RBBM_AHB_ERROR),
2993};
2994
2995/* Register offset defines for A5XX, in order of enum adreno_regs */
2996static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
2997 ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A5XX_CP_WFI_PEND_CTR),
2998 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A5XX_CP_RB_BASE),
2999 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A5XX_CP_RB_BASE_HI),
3000 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
3001 A5XX_CP_RB_RPTR_ADDR_LO),
3002 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
3003 A5XX_CP_RB_RPTR_ADDR_HI),
3004 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A5XX_CP_RB_RPTR),
3005 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A5XX_CP_RB_WPTR),
3006 ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A5XX_CP_CNTL),
3007 ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A5XX_CP_ME_CNTL),
3008 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A5XX_CP_RB_CNTL),
3009 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A5XX_CP_IB1_BASE),
3010 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE_HI, A5XX_CP_IB1_BASE_HI),
3011 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A5XX_CP_IB1_BUFSZ),
3012 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A5XX_CP_IB2_BASE),
3013 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE_HI, A5XX_CP_IB2_BASE_HI),
3014 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A5XX_CP_IB2_BUFSZ),
3015 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_ADDR, A5XX_CP_ROQ_DBG_ADDR),
3016 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A5XX_CP_ROQ_DBG_DATA),
3017 ADRENO_REG_DEFINE(ADRENO_REG_CP_MERCIU_ADDR, A5XX_CP_MERCIU_DBG_ADDR),
3018 ADRENO_REG_DEFINE(ADRENO_REG_CP_MERCIU_DATA, A5XX_CP_MERCIU_DBG_DATA_1),
3019 ADRENO_REG_DEFINE(ADRENO_REG_CP_MERCIU_DATA2,
3020 A5XX_CP_MERCIU_DBG_DATA_2),
3021 ADRENO_REG_DEFINE(ADRENO_REG_CP_MEQ_ADDR, A5XX_CP_MEQ_DBG_ADDR),
3022 ADRENO_REG_DEFINE(ADRENO_REG_CP_MEQ_DATA, A5XX_CP_MEQ_DBG_DATA),
3023 ADRENO_REG_DEFINE(ADRENO_REG_CP_PROTECT_REG_0, A5XX_CP_PROTECT_REG_0),
Carter Cooper8567af02017-03-15 14:22:03 -06003024 ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A5XX_CP_HW_FAULT),
Shrenuj Bansala419c792016-10-20 14:05:11 -07003025 ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT, A5XX_CP_CONTEXT_SWITCH_CNTL),
3026 ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_DEBUG, ADRENO_REG_SKIP),
3027 ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_DISABLE, ADRENO_REG_SKIP),
3028 ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
3029 A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO),
3030 ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
3031 A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI),
3032 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A5XX_RBBM_STATUS),
3033 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A5XX_RBBM_STATUS3),
3034 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A5XX_RBBM_PERFCTR_CNTL),
3035 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
3036 A5XX_RBBM_PERFCTR_LOAD_CMD0),
3037 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
3038 A5XX_RBBM_PERFCTR_LOAD_CMD1),
3039 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
3040 A5XX_RBBM_PERFCTR_LOAD_CMD2),
3041 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
3042 A5XX_RBBM_PERFCTR_LOAD_CMD3),
3043 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A5XX_RBBM_INT_0_MASK),
3044 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A5XX_RBBM_INT_0_STATUS),
3045 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_CLOCK_CTL, A5XX_RBBM_CLOCK_CNTL),
3046 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
3047 A5XX_RBBM_INT_CLEAR_CMD),
3048 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A5XX_RBBM_SW_RESET_CMD),
3049 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD,
3050 A5XX_RBBM_BLOCK_SW_RESET_CMD),
3051 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
3052 A5XX_RBBM_BLOCK_SW_RESET_CMD2),
3053 ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A5XX_UCHE_INVALIDATE0),
Abhilash Kumarf1af1042017-07-14 13:13:44 +05303054 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
3055 A5XX_RBBM_PERFCTR_RBBM_0_LO),
3056 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
3057 A5XX_RBBM_PERFCTR_RBBM_0_HI),
Shrenuj Bansala419c792016-10-20 14:05:11 -07003058 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
3059 A5XX_RBBM_PERFCTR_LOAD_VALUE_LO),
3060 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
3061 A5XX_RBBM_PERFCTR_LOAD_VALUE_HI),
3062 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
3063 A5XX_RBBM_SECVID_TRUST_CNTL),
3064 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONFIG,
3065 A5XX_RBBM_SECVID_TRUST_CONFIG),
3066 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
3067 A5XX_RBBM_SECVID_TSB_CNTL),
3068 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
3069 A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO),
3070 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
3071 A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI),
3072 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
3073 A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
3074 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
3075 A5XX_RBBM_ALWAYSON_COUNTER_LO),
3076 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
3077 A5XX_RBBM_ALWAYSON_COUNTER_HI),
3078 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
3079 A5XX_VBIF_XIN_HALT_CTRL0),
3080 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
3081 A5XX_VBIF_XIN_HALT_CTRL1),
3082 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION,
3083 A5XX_VBIF_VERSION),
Lynus Vaz76ecd062017-06-01 20:00:53 +05303084 ADRENO_REG_DEFINE(ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
3085 A5XX_GPMU_POWER_COUNTER_ENABLE),
Shrenuj Bansala419c792016-10-20 14:05:11 -07003086};
3087
3088static const struct adreno_reg_offsets a5xx_reg_offsets = {
3089 .offsets = a5xx_register_offsets,
3090 .offset_0 = ADRENO_REG_REGISTER_MAX,
3091};
3092
3093static void a5xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
3094{
3095 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
3096 unsigned int status1, status2;
3097
3098 kgsl_regread(device, A5XX_CP_INTERRUPT_STATUS, &status1);
3099
3100 if (status1 & BIT(A5XX_CP_OPCODE_ERROR)) {
3101 unsigned int val;
3102
3103 kgsl_regwrite(device, A5XX_CP_PFP_STAT_ADDR, 0);
3104
3105 /*
3106 * A5XX_CP_PFP_STAT_DATA is indexed, so read it twice to get the
3107 * value we want
3108 */
3109 kgsl_regread(device, A5XX_CP_PFP_STAT_DATA, &val);
3110 kgsl_regread(device, A5XX_CP_PFP_STAT_DATA, &val);
3111
3112 KGSL_DRV_CRIT_RATELIMIT(device,
3113 "ringbuffer opcode error | possible opcode=0x%8.8X\n",
3114 val);
3115 }
3116 if (status1 & BIT(A5XX_CP_RESERVED_BIT_ERROR))
3117 KGSL_DRV_CRIT_RATELIMIT(device,
3118 "ringbuffer reserved bit error interrupt\n");
3119 if (status1 & BIT(A5XX_CP_HW_FAULT_ERROR)) {
3120 kgsl_regread(device, A5XX_CP_HW_FAULT, &status2);
3121 KGSL_DRV_CRIT_RATELIMIT(device,
3122 "CP | Ringbuffer HW fault | status=%x\n",
3123 status2);
3124 }
3125 if (status1 & BIT(A5XX_CP_DMA_ERROR))
3126 KGSL_DRV_CRIT_RATELIMIT(device, "CP | DMA error\n");
3127 if (status1 & BIT(A5XX_CP_REGISTER_PROTECTION_ERROR)) {
3128 kgsl_regread(device, A5XX_CP_PROTECT_STATUS, &status2);
3129 KGSL_DRV_CRIT_RATELIMIT(device,
3130 "CP | Protected mode error| %s | addr=%x | status=%x\n",
3131 status2 & (1 << 24) ? "WRITE" : "READ",
3132 (status2 & 0xFFFFF) >> 2, status2);
3133 }
3134 if (status1 & BIT(A5XX_CP_AHB_ERROR)) {
3135 kgsl_regread(device, A5XX_CP_AHB_FAULT, &status2);
3136 KGSL_DRV_CRIT_RATELIMIT(device,
3137 "ringbuffer AHB error interrupt | status=%x\n",
3138 status2);
3139 }
3140}
3141
3142static void a5xx_err_callback(struct adreno_device *adreno_dev, int bit)
3143{
3144 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
3145 unsigned int reg;
3146
3147 switch (bit) {
3148 case A5XX_INT_RBBM_AHB_ERROR: {
3149 kgsl_regread(device, A5XX_RBBM_AHB_ERROR_STATUS, &reg);
3150
3151 /*
3152 * Return the word address of the erroring register so that it
3153 * matches the register specification
3154 */
3155 KGSL_DRV_CRIT_RATELIMIT(device,
3156 "RBBM | AHB bus error | %s | addr=%x | ports=%x:%x\n",
3157 reg & (1 << 28) ? "WRITE" : "READ",
3158 (reg & 0xFFFFF) >> 2, (reg >> 20) & 0x3,
3159 (reg >> 24) & 0xF);
3160
3161 /* Clear the error */
3162 kgsl_regwrite(device, A5XX_RBBM_AHB_CMD, (1 << 4));
3163 break;
3164 }
3165 case A5XX_INT_RBBM_TRANSFER_TIMEOUT:
3166 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: AHB transfer timeout\n");
3167 break;
3168 case A5XX_INT_RBBM_ME_MS_TIMEOUT:
3169 kgsl_regread(device, A5XX_RBBM_AHB_ME_SPLIT_STATUS, &reg);
3170 KGSL_DRV_CRIT_RATELIMIT(device,
3171 "RBBM | ME master split timeout | status=%x\n", reg);
3172 break;
3173 case A5XX_INT_RBBM_PFP_MS_TIMEOUT:
3174 kgsl_regread(device, A5XX_RBBM_AHB_PFP_SPLIT_STATUS, &reg);
3175 KGSL_DRV_CRIT_RATELIMIT(device,
3176 "RBBM | PFP master split timeout | status=%x\n", reg);
3177 break;
3178 case A5XX_INT_RBBM_ETS_MS_TIMEOUT:
3179 KGSL_DRV_CRIT_RATELIMIT(device,
3180 "RBBM: ME master split timeout\n");
3181 break;
3182 case A5XX_INT_RBBM_ATB_ASYNC_OVERFLOW:
3183 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB ASYNC overflow\n");
3184 break;
3185 case A5XX_INT_RBBM_ATB_BUS_OVERFLOW:
3186 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB bus overflow\n");
3187 break;
3188 case A5XX_INT_UCHE_OOB_ACCESS:
3189 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Out of bounds access\n");
3190 break;
3191 case A5XX_INT_UCHE_TRAP_INTR:
3192 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Trap interrupt\n");
3193 break;
3194 case A5XX_INT_GPMU_VOLTAGE_DROOP:
3195 KGSL_DRV_CRIT_RATELIMIT(device, "GPMU: Voltage droop\n");
3196 break;
3197 default:
3198 KGSL_DRV_CRIT_RATELIMIT(device, "Unknown interrupt %d\n", bit);
3199 }
3200}
3201
3202static void a5xx_irq_storm_worker(struct work_struct *work)
3203{
3204 struct adreno_device *adreno_dev = container_of(work,
3205 struct adreno_device, irq_storm_work);
3206 struct kgsl_device *device = &adreno_dev->dev;
3207 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
3208 unsigned int status;
3209
3210 mutex_lock(&device->mutex);
3211
3212 /* Wait for the storm to clear up */
3213 do {
3214 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
3215 BIT(A5XX_INT_CP_CACHE_FLUSH_TS));
3216 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS,
3217 &status);
3218 } while (status & BIT(A5XX_INT_CP_CACHE_FLUSH_TS));
3219
3220 /* Re-enable the interrupt bit in the mask */
3221 gpudev->irq->mask |= BIT(A5XX_INT_CP_CACHE_FLUSH_TS);
3222 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK,
3223 gpudev->irq->mask);
3224 clear_bit(ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED, &adreno_dev->priv);
3225
3226 KGSL_DRV_WARN(device, "Re-enabled A5XX_INT_CP_CACHE_FLUSH_TS");
3227 mutex_unlock(&device->mutex);
3228
3229 /* Reschedule just to make sure everything retires */
3230 adreno_dispatcher_schedule(device);
3231}
3232
3233static void a5xx_cp_callback(struct adreno_device *adreno_dev, int bit)
3234{
3235 struct kgsl_device *device = &adreno_dev->dev;
3236 unsigned int cur;
3237 static unsigned int count;
3238 static unsigned int prev;
3239
3240 if (test_bit(ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED, &adreno_dev->priv))
3241 return;
3242
3243 kgsl_sharedmem_readl(&device->memstore, &cur,
3244 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
3245 ref_wait_ts));
3246
3247 /*
3248 * prev holds a previously read value
3249 * from memory. It should be changed by the GPU with every
3250 * interrupt. If the value we know about and the value we just
3251 * read are the same, then we are likely in a storm.
3252 * If this happens twice, disable the interrupt in the mask
3253 * so the dispatcher can take care of the issue. It is then
3254 * up to the dispatcher to re-enable the mask once all work
3255 * is done and the storm has ended.
3256 */
3257 if (prev == cur) {
3258 count++;
3259 if (count == 2) {
3260 struct adreno_gpudev *gpudev =
3261 ADRENO_GPU_DEVICE(adreno_dev);
3262
3263 /* disable interrupt from the mask */
3264 set_bit(ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED,
3265 &adreno_dev->priv);
3266 gpudev->irq->mask &= ~BIT(A5XX_INT_CP_CACHE_FLUSH_TS);
3267 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK,
3268 gpudev->irq->mask);
3269
3270 kgsl_schedule_work(&adreno_dev->irq_storm_work);
3271
3272 return;
3273 }
3274 } else {
3275 count = 0;
3276 prev = cur;
3277 }
3278
3279 a5xx_preemption_trigger(adreno_dev);
3280 adreno_dispatcher_schedule(device);
3281}
3282
3283static const char *gpmu_int_msg[32] = {
3284 [FW_INTR_INFO] = "FW_INTR_INFO",
3285 [LLM_ACK_ERR_INTR] = "LLM_ACK_ERR_INTR",
3286 [ISENS_TRIM_ERR_INTR] = "ISENS_TRIM_ERR_INTR",
3287 [ISENS_ERR_INTR] = "ISENS_ERR_INTR",
3288 [ISENS_IDLE_ERR_INTR] = "ISENS_IDLE_ERR_INTR",
3289 [ISENS_PWR_ON_ERR_INTR] = "ISENS_PWR_ON_ERR_INTR",
3290 [6 ... 30] = "",
3291 [WDOG_EXPITED] = "WDOG_EXPITED"};
3292
3293static void a5xx_gpmu_int_callback(struct adreno_device *adreno_dev, int bit)
3294{
3295 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
3296 unsigned int reg, i;
3297
3298 kgsl_regread(device, A5XX_GPMU_RBBM_INTR_INFO, &reg);
3299
3300 if (reg & (~VALID_GPMU_IRQ)) {
3301 KGSL_DRV_CRIT_RATELIMIT(device,
3302 "GPMU: Unknown IRQ mask 0x%08lx in 0x%08x\n",
3303 reg & (~VALID_GPMU_IRQ), reg);
3304 }
3305
3306 for (i = 0; i < 32; i++)
3307 switch (reg & BIT(i)) {
3308 case BIT(WDOG_EXPITED):
3309 if (test_and_clear_bit(ADRENO_DEVICE_GPMU_INITIALIZED,
3310 &adreno_dev->priv)) {
3311 /* Stop GPMU */
3312 kgsl_regwrite(device,
3313 A5XX_GPMU_CM3_SYSRESET, 1);
3314 kgsl_schedule_work(&adreno_dev->gpmu_work);
3315 }
3316 /* fallthrough */
3317 case BIT(FW_INTR_INFO):
3318 case BIT(LLM_ACK_ERR_INTR):
3319 case BIT(ISENS_TRIM_ERR_INTR):
3320 case BIT(ISENS_ERR_INTR):
3321 case BIT(ISENS_IDLE_ERR_INTR):
3322 case BIT(ISENS_PWR_ON_ERR_INTR):
3323 KGSL_DRV_CRIT_RATELIMIT(device,
3324 "GPMU: interrupt %s(%08lx)\n",
3325 gpmu_int_msg[i],
3326 BIT(i));
3327 break;
3328 }
3329}
3330
3331/*
Carter Cooperc8d48642017-08-18 10:39:57 -06003332 * a5xx_gpc_err_int_callback() - Isr for GPC error interrupts
Shrenuj Bansala419c792016-10-20 14:05:11 -07003333 * @adreno_dev: Pointer to device
3334 * @bit: Interrupt bit
3335 */
Carter Cooperc8d48642017-08-18 10:39:57 -06003336static void a5xx_gpc_err_int_callback(struct adreno_device *adreno_dev, int bit)
Shrenuj Bansala419c792016-10-20 14:05:11 -07003337{
3338 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
3339
3340 /*
3341 * GPC error is typically the result of mistake SW programming.
3342 * Force GPU fault for this interrupt so that we can debug it
3343 * with help of register dump.
3344 */
3345
Carter Cooperc8d48642017-08-18 10:39:57 -06003346 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: GPC error\n");
Shrenuj Bansala419c792016-10-20 14:05:11 -07003347 adreno_irqctrl(adreno_dev, 0);
3348
3349 /* Trigger a fault in the dispatcher - this will effect a restart */
3350 adreno_set_gpu_fault(adreno_dev, ADRENO_SOFT_FAULT);
3351 adreno_dispatcher_schedule(device);
3352}
3353
3354#define A5XX_INT_MASK \
3355 ((1 << A5XX_INT_RBBM_AHB_ERROR) | \
3356 (1 << A5XX_INT_RBBM_TRANSFER_TIMEOUT) | \
3357 (1 << A5XX_INT_RBBM_ME_MS_TIMEOUT) | \
3358 (1 << A5XX_INT_RBBM_PFP_MS_TIMEOUT) | \
3359 (1 << A5XX_INT_RBBM_ETS_MS_TIMEOUT) | \
3360 (1 << A5XX_INT_RBBM_ATB_ASYNC_OVERFLOW) | \
3361 (1 << A5XX_INT_RBBM_GPC_ERROR) | \
3362 (1 << A5XX_INT_CP_HW_ERROR) | \
3363 (1 << A5XX_INT_CP_CACHE_FLUSH_TS) | \
3364 (1 << A5XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
3365 (1 << A5XX_INT_UCHE_OOB_ACCESS) | \
3366 (1 << A5XX_INT_UCHE_TRAP_INTR) | \
3367 (1 << A5XX_INT_CP_SW) | \
3368 (1 << A5XX_INT_GPMU_FIRMWARE) | \
3369 (1 << A5XX_INT_GPMU_VOLTAGE_DROOP))
3370
3371
3372static struct adreno_irq_funcs a5xx_irq_funcs[32] = {
3373 ADRENO_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
3374 ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 1 - RBBM_AHB_ERROR */
3375 ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 2 - RBBM_TRANSFER_TIMEOUT */
3376 /* 3 - RBBM_ME_MASTER_SPLIT_TIMEOUT */
3377 ADRENO_IRQ_CALLBACK(a5xx_err_callback),
3378 /* 4 - RBBM_PFP_MASTER_SPLIT_TIMEOUT */
3379 ADRENO_IRQ_CALLBACK(a5xx_err_callback),
3380 /* 5 - RBBM_ETS_MASTER_SPLIT_TIMEOUT */
3381 ADRENO_IRQ_CALLBACK(a5xx_err_callback),
3382 /* 6 - RBBM_ATB_ASYNC_OVERFLOW */
3383 ADRENO_IRQ_CALLBACK(a5xx_err_callback),
Carter Cooperc8d48642017-08-18 10:39:57 -06003384 ADRENO_IRQ_CALLBACK(a5xx_gpc_err_int_callback), /* 7 - GPC_ERR */
Shrenuj Bansala419c792016-10-20 14:05:11 -07003385 ADRENO_IRQ_CALLBACK(a5xx_preempt_callback),/* 8 - CP_SW */
3386 ADRENO_IRQ_CALLBACK(a5xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */
3387 /* 10 - CP_CCU_FLUSH_DEPTH_TS */
3388 ADRENO_IRQ_CALLBACK(NULL),
3389 /* 11 - CP_CCU_FLUSH_COLOR_TS */
3390 ADRENO_IRQ_CALLBACK(NULL),
3391 /* 12 - CP_CCU_RESOLVE_TS */
3392 ADRENO_IRQ_CALLBACK(NULL),
3393 ADRENO_IRQ_CALLBACK(NULL), /* 13 - CP_IB2_INT */
3394 ADRENO_IRQ_CALLBACK(NULL), /* 14 - CP_IB1_INT */
3395 ADRENO_IRQ_CALLBACK(NULL), /* 15 - CP_RB_INT */
3396 /* 16 - CCP_UNUSED_1 */
3397 ADRENO_IRQ_CALLBACK(NULL),
3398 ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
3399 ADRENO_IRQ_CALLBACK(NULL), /* 18 - CP_WT_DONE_TS */
3400 ADRENO_IRQ_CALLBACK(NULL), /* 19 - UNKNOWN_1 */
3401 ADRENO_IRQ_CALLBACK(a5xx_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
3402 /* 21 - UNUSED_2 */
3403 ADRENO_IRQ_CALLBACK(NULL),
3404 ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 22 - RBBM_ATB_BUS_OVERFLOW */
3405 /* 23 - MISC_HANG_DETECT */
3406 ADRENO_IRQ_CALLBACK(adreno_hang_int_callback),
3407 ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 24 - UCHE_OOB_ACCESS */
3408 ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 25 - UCHE_TRAP_INTR */
3409 ADRENO_IRQ_CALLBACK(NULL), /* 26 - DEBBUS_INTR_0 */
3410 ADRENO_IRQ_CALLBACK(NULL), /* 27 - DEBBUS_INTR_1 */
3411 ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 28 - GPMU_VOLTAGE_DROOP */
3412 ADRENO_IRQ_CALLBACK(a5xx_gpmu_int_callback), /* 29 - GPMU_FIRMWARE */
3413 ADRENO_IRQ_CALLBACK(NULL), /* 30 - ISDB_CPU_IRQ */
3414 ADRENO_IRQ_CALLBACK(NULL), /* 31 - ISDB_UNDER_DEBUG */
3415};
3416
3417static struct adreno_irq a5xx_irq = {
3418 .funcs = a5xx_irq_funcs,
3419 .mask = A5XX_INT_MASK,
3420};
3421
3422/*
3423 * Default size for CP queues for A5xx targets. You must
3424 * overwrite these value in platform_setup function for
3425 * A5xx derivatives if size differs.
3426 */
3427static struct adreno_snapshot_sizes a5xx_snap_sizes = {
3428 .cp_pfp = 36,
3429 .cp_me = 29,
3430 .cp_meq = 64,
3431 .cp_merciu = 64,
3432 .roq = 512,
3433};
3434
3435static struct adreno_snapshot_data a5xx_snapshot_data = {
3436 .sect_sizes = &a5xx_snap_sizes,
3437};
3438
3439static struct adreno_coresight_register a5xx_coresight_registers[] = {
3440 { A5XX_RBBM_CFG_DBGBUS_SEL_A },
3441 { A5XX_RBBM_CFG_DBGBUS_SEL_B },
3442 { A5XX_RBBM_CFG_DBGBUS_SEL_C },
3443 { A5XX_RBBM_CFG_DBGBUS_SEL_D },
3444 { A5XX_RBBM_CFG_DBGBUS_CNTLT },
3445 { A5XX_RBBM_CFG_DBGBUS_CNTLM },
3446 { A5XX_RBBM_CFG_DBGBUS_OPL },
3447 { A5XX_RBBM_CFG_DBGBUS_OPE },
3448 { A5XX_RBBM_CFG_DBGBUS_IVTL_0 },
3449 { A5XX_RBBM_CFG_DBGBUS_IVTL_1 },
3450 { A5XX_RBBM_CFG_DBGBUS_IVTL_2 },
3451 { A5XX_RBBM_CFG_DBGBUS_IVTL_3 },
3452 { A5XX_RBBM_CFG_DBGBUS_MASKL_0 },
3453 { A5XX_RBBM_CFG_DBGBUS_MASKL_1 },
3454 { A5XX_RBBM_CFG_DBGBUS_MASKL_2 },
3455 { A5XX_RBBM_CFG_DBGBUS_MASKL_3 },
3456 { A5XX_RBBM_CFG_DBGBUS_BYTEL_0 },
3457 { A5XX_RBBM_CFG_DBGBUS_BYTEL_1 },
3458 { A5XX_RBBM_CFG_DBGBUS_IVTE_0 },
3459 { A5XX_RBBM_CFG_DBGBUS_IVTE_1 },
3460 { A5XX_RBBM_CFG_DBGBUS_IVTE_2 },
3461 { A5XX_RBBM_CFG_DBGBUS_IVTE_3 },
3462 { A5XX_RBBM_CFG_DBGBUS_MASKE_0 },
3463 { A5XX_RBBM_CFG_DBGBUS_MASKE_1 },
3464 { A5XX_RBBM_CFG_DBGBUS_MASKE_2 },
3465 { A5XX_RBBM_CFG_DBGBUS_MASKE_3 },
3466 { A5XX_RBBM_CFG_DBGBUS_NIBBLEE },
3467 { A5XX_RBBM_CFG_DBGBUS_PTRC0 },
3468 { A5XX_RBBM_CFG_DBGBUS_PTRC1 },
3469 { A5XX_RBBM_CFG_DBGBUS_LOADREG },
3470 { A5XX_RBBM_CFG_DBGBUS_IDX },
3471 { A5XX_RBBM_CFG_DBGBUS_CLRC },
3472 { A5XX_RBBM_CFG_DBGBUS_LOADIVT },
3473 { A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC },
3474 { A5XX_RBBM_CFG_DBGBUS_OVER },
3475 { A5XX_RBBM_CFG_DBGBUS_COUNT0 },
3476 { A5XX_RBBM_CFG_DBGBUS_COUNT1 },
3477 { A5XX_RBBM_CFG_DBGBUS_COUNT2 },
3478 { A5XX_RBBM_CFG_DBGBUS_COUNT3 },
3479 { A5XX_RBBM_CFG_DBGBUS_COUNT4 },
3480 { A5XX_RBBM_CFG_DBGBUS_COUNT5 },
3481 { A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR },
3482 { A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 },
3483 { A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 },
3484 { A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 },
3485 { A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 },
3486 { A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 },
3487 { A5XX_RBBM_CFG_DBGBUS_MISR0 },
3488 { A5XX_RBBM_CFG_DBGBUS_MISR1 },
3489 { A5XX_RBBM_AHB_DBG_CNTL },
3490 { A5XX_RBBM_READ_AHB_THROUGH_DBG },
3491 { A5XX_RBBM_DBG_LO_HI_GPIO },
3492 { A5XX_RBBM_EXT_TRACE_BUS_CNTL },
3493 { A5XX_RBBM_EXT_VBIF_DBG_CNTL },
3494};
3495
3496static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_a, &a5xx_coresight_registers[0]);
3497static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_b, &a5xx_coresight_registers[1]);
3498static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_c, &a5xx_coresight_registers[2]);
3499static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_d, &a5xx_coresight_registers[3]);
3500static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_cntlt, &a5xx_coresight_registers[4]);
3501static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_cntlm, &a5xx_coresight_registers[5]);
3502static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_opl, &a5xx_coresight_registers[6]);
3503static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ope, &a5xx_coresight_registers[7]);
3504static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_0, &a5xx_coresight_registers[8]);
3505static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_1, &a5xx_coresight_registers[9]);
3506static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_2, &a5xx_coresight_registers[10]);
3507static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_3, &a5xx_coresight_registers[11]);
3508static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_0, &a5xx_coresight_registers[12]);
3509static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_1, &a5xx_coresight_registers[13]);
3510static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_2, &a5xx_coresight_registers[14]);
3511static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_3, &a5xx_coresight_registers[15]);
3512static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_bytel_0, &a5xx_coresight_registers[16]);
3513static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_bytel_1, &a5xx_coresight_registers[17]);
3514static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_0, &a5xx_coresight_registers[18]);
3515static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_1, &a5xx_coresight_registers[19]);
3516static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_2, &a5xx_coresight_registers[20]);
3517static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_3, &a5xx_coresight_registers[21]);
3518static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_0, &a5xx_coresight_registers[22]);
3519static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_1, &a5xx_coresight_registers[23]);
3520static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_2, &a5xx_coresight_registers[24]);
3521static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_3, &a5xx_coresight_registers[25]);
3522static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_nibblee, &a5xx_coresight_registers[26]);
3523static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ptrc0, &a5xx_coresight_registers[27]);
3524static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ptrc1, &a5xx_coresight_registers[28]);
3525static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_loadreg, &a5xx_coresight_registers[29]);
3526static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_idx, &a5xx_coresight_registers[30]);
3527static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_clrc, &a5xx_coresight_registers[31]);
3528static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_loadivt, &a5xx_coresight_registers[32]);
3529static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_event_logic,
3530 &a5xx_coresight_registers[33]);
3531static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_over, &a5xx_coresight_registers[34]);
3532static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_count0, &a5xx_coresight_registers[35]);
3533static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_count1, &a5xx_coresight_registers[36]);
3534static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_count2, &a5xx_coresight_registers[37]);
3535static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_count3, &a5xx_coresight_registers[38]);
3536static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_count4, &a5xx_coresight_registers[39]);
3537static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_count5, &a5xx_coresight_registers[40]);
3538static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_addr,
3539 &a5xx_coresight_registers[41]);
3540static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_buf0,
3541 &a5xx_coresight_registers[42]);
3542static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_buf1,
3543 &a5xx_coresight_registers[43]);
3544static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_buf2,
3545 &a5xx_coresight_registers[44]);
3546static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_buf3,
3547 &a5xx_coresight_registers[45]);
3548static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_buf4,
3549 &a5xx_coresight_registers[46]);
3550static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_misr0, &a5xx_coresight_registers[47]);
3551static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_misr1, &a5xx_coresight_registers[48]);
3552static ADRENO_CORESIGHT_ATTR(ahb_dbg_cntl, &a5xx_coresight_registers[49]);
3553static ADRENO_CORESIGHT_ATTR(read_ahb_through_dbg,
3554 &a5xx_coresight_registers[50]);
3555static ADRENO_CORESIGHT_ATTR(dbg_lo_hi_gpio, &a5xx_coresight_registers[51]);
3556static ADRENO_CORESIGHT_ATTR(ext_trace_bus_cntl, &a5xx_coresight_registers[52]);
3557static ADRENO_CORESIGHT_ATTR(ext_vbif_dbg_cntl, &a5xx_coresight_registers[53]);
3558
3559static struct attribute *a5xx_coresight_attrs[] = {
3560 &coresight_attr_cfg_dbgbus_sel_a.attr.attr,
3561 &coresight_attr_cfg_dbgbus_sel_b.attr.attr,
3562 &coresight_attr_cfg_dbgbus_sel_c.attr.attr,
3563 &coresight_attr_cfg_dbgbus_sel_d.attr.attr,
3564 &coresight_attr_cfg_dbgbus_cntlt.attr.attr,
3565 &coresight_attr_cfg_dbgbus_cntlm.attr.attr,
3566 &coresight_attr_cfg_dbgbus_opl.attr.attr,
3567 &coresight_attr_cfg_dbgbus_ope.attr.attr,
3568 &coresight_attr_cfg_dbgbus_ivtl_0.attr.attr,
3569 &coresight_attr_cfg_dbgbus_ivtl_1.attr.attr,
3570 &coresight_attr_cfg_dbgbus_ivtl_2.attr.attr,
3571 &coresight_attr_cfg_dbgbus_ivtl_3.attr.attr,
3572 &coresight_attr_cfg_dbgbus_maskl_0.attr.attr,
3573 &coresight_attr_cfg_dbgbus_maskl_1.attr.attr,
3574 &coresight_attr_cfg_dbgbus_maskl_2.attr.attr,
3575 &coresight_attr_cfg_dbgbus_maskl_3.attr.attr,
3576 &coresight_attr_cfg_dbgbus_bytel_0.attr.attr,
3577 &coresight_attr_cfg_dbgbus_bytel_1.attr.attr,
3578 &coresight_attr_cfg_dbgbus_ivte_0.attr.attr,
3579 &coresight_attr_cfg_dbgbus_ivte_1.attr.attr,
3580 &coresight_attr_cfg_dbgbus_ivte_2.attr.attr,
3581 &coresight_attr_cfg_dbgbus_ivte_3.attr.attr,
3582 &coresight_attr_cfg_dbgbus_maske_0.attr.attr,
3583 &coresight_attr_cfg_dbgbus_maske_1.attr.attr,
3584 &coresight_attr_cfg_dbgbus_maske_2.attr.attr,
3585 &coresight_attr_cfg_dbgbus_maske_3.attr.attr,
3586 &coresight_attr_cfg_dbgbus_nibblee.attr.attr,
3587 &coresight_attr_cfg_dbgbus_ptrc0.attr.attr,
3588 &coresight_attr_cfg_dbgbus_ptrc1.attr.attr,
3589 &coresight_attr_cfg_dbgbus_loadreg.attr.attr,
3590 &coresight_attr_cfg_dbgbus_idx.attr.attr,
3591 &coresight_attr_cfg_dbgbus_clrc.attr.attr,
3592 &coresight_attr_cfg_dbgbus_loadivt.attr.attr,
3593 &coresight_attr_cfg_dbgbus_event_logic.attr.attr,
3594 &coresight_attr_cfg_dbgbus_over.attr.attr,
3595 &coresight_attr_cfg_dbgbus_count0.attr.attr,
3596 &coresight_attr_cfg_dbgbus_count1.attr.attr,
3597 &coresight_attr_cfg_dbgbus_count2.attr.attr,
3598 &coresight_attr_cfg_dbgbus_count3.attr.attr,
3599 &coresight_attr_cfg_dbgbus_count4.attr.attr,
3600 &coresight_attr_cfg_dbgbus_count5.attr.attr,
3601 &coresight_attr_cfg_dbgbus_trace_addr.attr.attr,
3602 &coresight_attr_cfg_dbgbus_trace_buf0.attr.attr,
3603 &coresight_attr_cfg_dbgbus_trace_buf1.attr.attr,
3604 &coresight_attr_cfg_dbgbus_trace_buf2.attr.attr,
3605 &coresight_attr_cfg_dbgbus_trace_buf3.attr.attr,
3606 &coresight_attr_cfg_dbgbus_trace_buf4.attr.attr,
3607 &coresight_attr_cfg_dbgbus_misr0.attr.attr,
3608 &coresight_attr_cfg_dbgbus_misr1.attr.attr,
3609 &coresight_attr_ahb_dbg_cntl.attr.attr,
3610 &coresight_attr_read_ahb_through_dbg.attr.attr,
3611 &coresight_attr_dbg_lo_hi_gpio.attr.attr,
3612 &coresight_attr_ext_trace_bus_cntl.attr.attr,
3613 &coresight_attr_ext_vbif_dbg_cntl.attr.attr,
3614 NULL,
3615};
3616
3617static const struct attribute_group a5xx_coresight_group = {
3618 .attrs = a5xx_coresight_attrs,
3619};
3620
3621static const struct attribute_group *a5xx_coresight_groups[] = {
3622 &a5xx_coresight_group,
3623 NULL,
3624};
3625
3626static struct adreno_coresight a5xx_coresight = {
3627 .registers = a5xx_coresight_registers,
3628 .count = ARRAY_SIZE(a5xx_coresight_registers),
3629 .groups = a5xx_coresight_groups,
3630};
3631
3632struct adreno_gpudev adreno_a5xx_gpudev = {
3633 .reg_offsets = &a5xx_reg_offsets,
3634 .int_bits = a5xx_int_bits,
3635 .ft_perf_counters = a5xx_ft_perf_counters,
3636 .ft_perf_counters_count = ARRAY_SIZE(a5xx_ft_perf_counters),
Lokesh Batraa8300e02017-05-25 11:17:40 -07003637 .coresight = {&a5xx_coresight},
Shrenuj Bansala419c792016-10-20 14:05:11 -07003638 .start = a5xx_start,
3639 .snapshot = a5xx_snapshot,
3640 .irq = &a5xx_irq,
3641 .snapshot_data = &a5xx_snapshot_data,
3642 .irq_trace = trace_kgsl_a5xx_irq_status,
3643 .num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
3644 .platform_setup = a5xx_platform_setup,
3645 .init = a5xx_init,
3646 .remove = a5xx_remove,
3647 .rb_start = a5xx_rb_start,
3648 .microcode_read = a5xx_microcode_read,
3649 .perfcounters = &a5xx_perfcounters,
3650 .vbif_xin_halt_ctrl0_mask = A5XX_VBIF_XIN_HALT_CTRL0_MASK,
3651 .is_sptp_idle = a5xx_is_sptp_idle,
3652 .regulator_enable = a5xx_regulator_enable,
3653 .regulator_disable = a5xx_regulator_disable,
3654 .pwrlevel_change_settings = a5xx_pwrlevel_change_settings,
3655 .read_throttling_counters = a5xx_read_throttling_counters,
3656 .count_throttles = a5xx_count_throttles,
3657 .enable_pwr_counters = a5xx_enable_pwr_counters,
3658 .preemption_pre_ibsubmit = a5xx_preemption_pre_ibsubmit,
3659 .preemption_yield_enable =
3660 a5xx_preemption_yield_enable,
3661 .preemption_post_ibsubmit =
3662 a5xx_preemption_post_ibsubmit,
3663 .preemption_init = a5xx_preemption_init,
3664 .preemption_schedule = a5xx_preemption_schedule,
3665 .enable_64bit = a5xx_enable_64bit,
3666 .clk_set_options = a5xx_clk_set_options,
3667};