blob: 381bc3ee6fced813b2a2962488ad4a1dec7c709d [file] [log] [blame]
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
Carter Cooper4a313ae2017-02-23 11:11:56 -070014#include <soc/qcom/subsystem_restart.h>
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070015#include <linux/pm_opp.h>
Tarun Karra1382e512017-10-30 19:41:25 -070016#include <linux/jiffies.h>
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070017
18#include "adreno.h"
19#include "a6xx_reg.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080020#include "adreno_a6xx.h"
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070021#include "adreno_cp_parser.h"
22#include "adreno_trace.h"
23#include "adreno_pm4types.h"
24#include "adreno_perfcounter.h"
25#include "adreno_ringbuffer.h"
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060026#include "adreno_llc.h"
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070027#include "kgsl_sharedmem.h"
28#include "kgsl_log.h"
29#include "kgsl.h"
Kyle Pieferb1027b02017-02-10 13:58:58 -080030#include "kgsl_gmu.h"
31#include "kgsl_trace.h"
32
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070033#define MIN_HBB 13
34
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060035#define A6XX_LLC_NUM_GPU_SCIDS 5
36#define A6XX_GPU_LLC_SCID_NUM_BITS 5
37#define A6XX_GPU_LLC_SCID_MASK \
38 ((1 << (A6XX_LLC_NUM_GPU_SCIDS * A6XX_GPU_LLC_SCID_NUM_BITS)) - 1)
Sushmita Susheelendra906564d2017-01-10 15:53:55 -070039#define A6XX_GPUHTW_LLC_SCID_SHIFT 25
40#define A6XX_GPUHTW_LLC_SCID_MASK \
41 (((1 << A6XX_GPU_LLC_SCID_NUM_BITS) - 1) << A6XX_GPUHTW_LLC_SCID_SHIFT)
42
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060043#define A6XX_GPU_CX_REG_BASE 0x509E000
44#define A6XX_GPU_CX_REG_SIZE 0x1000
45
Harshdeep Dhatt720394d2017-09-13 14:25:09 -060046#define GPU_LIMIT_THRESHOLD_ENABLE BIT(31)
47
Kyle Pieferb1027b02017-02-10 13:58:58 -080048static int _load_gmu_firmware(struct kgsl_device *device);
49
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070050static const struct adreno_vbif_data a630_vbif[] = {
51 {A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009},
52 {A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
53 {0, 0},
54};
55
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +053056static const struct adreno_vbif_data a615_gbif[] = {
57 {A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
58 {0, 0},
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070059};
60
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +053061static const struct adreno_vbif_platform a6xx_vbif_platforms[] = {
62 { adreno_is_a630, a630_vbif },
63 { adreno_is_a615, a615_gbif },
64};
Oleg Pereletcb9b6212017-03-16 15:38:43 -070065
66struct kgsl_hwcg_reg {
67 unsigned int off;
68 unsigned int val;
69};
70static const struct kgsl_hwcg_reg a630_hwcg_regs[] = {
Kyle Pieferb16c6072017-10-23 16:08:45 -070071 {A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
72 {A6XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
73 {A6XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
74 {A6XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
George Shen60d2ba52017-06-29 10:45:07 -070075 {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
76 {A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
77 {A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
78 {A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
Kyle Piefercc4371f2017-10-12 15:43:55 -070079 {A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
80 {A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
81 {A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
82 {A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
83 {A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
84 {A6XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
85 {A6XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
86 {A6XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
George Shenc34b9e32017-06-20 11:42:19 -070087 {A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
88 {A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
89 {A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
90 {A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
Oleg Pereletcb9b6212017-03-16 15:38:43 -070091 {A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
92 {A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
93 {A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
94 {A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
95 {A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
96 {A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
97 {A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
98 {A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
99 {A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
100 {A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
101 {A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
102 {A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
103 {A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
104 {A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
105 {A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
106 {A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
107 {A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
108 {A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
109 {A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
110 {A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
Kyle Piefercc4371f2017-10-12 15:43:55 -0700111 {A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
112 {A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
113 {A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
114 {A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700115 {A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
116 {A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
117 {A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
118 {A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
119 {A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
120 {A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
121 {A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
122 {A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
123 {A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
124 {A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
125 {A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
126 {A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
127 {A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
128 {A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
129 {A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
130 {A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
131 {A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
132 {A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
133 {A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
134 {A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
135 {A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
136 {A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
137 {A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
138 {A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
139 {A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
140 {A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
141 {A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
142 {A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
143 {A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
144 {A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
145 {A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
146 {A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
147 {A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
148 {A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
Kyle Piefer0c3e7522017-10-23 15:49:49 -0700149 {A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
150 {A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
151 {A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
152 {A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700153 {A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
154 {A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
155 {A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
156 {A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
Kyle Piefer0c3e7522017-10-23 15:49:49 -0700157 {A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
158 {A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
Kyle Piefercc4371f2017-10-12 15:43:55 -0700159 {A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700160 {A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
Kyle Piefer0c3e7522017-10-23 15:49:49 -0700161 {A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
Kyle Piefer42d20bf2017-10-19 15:35:41 -0700162 {A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700163 {A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
164 {A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
165 {A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
166 {A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
167 {A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
168 {A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
169 {A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
170 {A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
171 {A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
172 {A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
173 {A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
174 {A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
175 {A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
176};
177
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530178static const struct kgsl_hwcg_reg a615_hwcg_regs[] = {
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530179 {A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530180 {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530181 {A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530182 {A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530183 {A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
184 {A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530185 {A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
186 {A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
187 {A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
188 {A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
189 {A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
190 {A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
191 {A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
192 {A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
193 {A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
194 {A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
195 {A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
196 {A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
197 {A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
198 {A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
199 {A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
200 {A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
201 {A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
202 {A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
203 {A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
204 {A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
205 {A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
206 {A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
207 {A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
208 {A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
209 {A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
210 {A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
211 {A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
212 {A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
213 {A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
214 {A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
215 {A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
216 {A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
217 {A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
218 {A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
219 {A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
220 {A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
221 {A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
222 {A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
223 {A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
224 {A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
225 {A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
226 {A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
227 {A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530228 {A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530229 {A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
230 {A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
231 {A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
232 {A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
233 {A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
234 {A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
235 {A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
236 {A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
237 {A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
238 {A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
239 {A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
240 {A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
241 {A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
242};
243
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700244static const struct {
245 int (*devfunc)(struct adreno_device *adreno_dev);
246 const struct kgsl_hwcg_reg *regs;
247 unsigned int count;
248} a6xx_hwcg_registers[] = {
Rajesh Kemisetti8d5cc6e2017-06-06 16:44:17 +0530249 {adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)},
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530250 {adreno_is_a615, a615_hwcg_regs, ARRAY_SIZE(a615_hwcg_regs)},
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700251};
252
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700253static struct a6xx_protected_regs {
254 unsigned int base;
255 unsigned int count;
256 int read_protect;
257} a6xx_protected_regs_group[] = {
258 { 0x600, 0x51, 0 },
259 { 0xAE50, 0x2, 1 },
260 { 0x9624, 0x13, 1 },
261 { 0x8630, 0x8, 1 },
262 { 0x9E70, 0x1, 1 },
263 { 0x9E78, 0x187, 1 },
264 { 0xF000, 0x810, 1 },
265 { 0xFC00, 0x3, 0 },
266 { 0x50E, 0x0, 1 },
267 { 0x50F, 0x0, 0 },
268 { 0x510, 0x0, 1 },
269 { 0x0, 0x4F9, 0 },
270 { 0x501, 0xA, 0 },
271 { 0x511, 0x44, 0 },
Shrenuj Bansal932c8ef2017-08-07 15:16:15 -0700272 { 0xE00, 0x1, 1 },
273 { 0xE03, 0xB, 1 },
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700274 { 0x8E00, 0x0, 1 },
275 { 0x8E50, 0xF, 1 },
276 { 0xBE02, 0x0, 1 },
277 { 0xBE20, 0x11F3, 1 },
278 { 0x800, 0x82, 1 },
279 { 0x8A0, 0x8, 1 },
280 { 0x8AB, 0x19, 1 },
281 { 0x900, 0x4D, 1 },
282 { 0x98D, 0x76, 1 },
283 { 0x8D0, 0x23, 0 },
284 { 0x980, 0x4, 0 },
285 { 0xA630, 0x0, 1 },
286};
287
Tarun Karra4ea68122017-11-02 18:10:31 -0700288/* IFPC & Preemption static powerup restore list */
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600289static struct reg_list_pair {
290 uint32_t offset;
291 uint32_t val;
292} a6xx_pwrup_reglist[] = {
293 { A6XX_VSC_ADDR_MODE_CNTL, 0x0 },
294 { A6XX_GRAS_ADDR_MODE_CNTL, 0x0 },
295 { A6XX_RB_ADDR_MODE_CNTL, 0x0 },
296 { A6XX_PC_ADDR_MODE_CNTL, 0x0 },
297 { A6XX_HLSQ_ADDR_MODE_CNTL, 0x0 },
298 { A6XX_VFD_ADDR_MODE_CNTL, 0x0 },
299 { A6XX_VPC_ADDR_MODE_CNTL, 0x0 },
300 { A6XX_UCHE_ADDR_MODE_CNTL, 0x0 },
301 { A6XX_SP_ADDR_MODE_CNTL, 0x0 },
302 { A6XX_TPL1_ADDR_MODE_CNTL, 0x0 },
303 { A6XX_UCHE_WRITE_RANGE_MAX_LO, 0x0 },
304 { A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0 },
305 { A6XX_UCHE_TRAP_BASE_LO, 0x0 },
306 { A6XX_UCHE_TRAP_BASE_HI, 0x0 },
307 { A6XX_UCHE_WRITE_THRU_BASE_LO, 0x0 },
308 { A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0 },
309 { A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x0 },
310 { A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0 },
311 { A6XX_UCHE_GMEM_RANGE_MAX_LO, 0x0 },
312 { A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0 },
313 { A6XX_UCHE_FILTER_CNTL, 0x0 },
314 { A6XX_UCHE_CACHE_WAYS, 0x0 },
315 { A6XX_UCHE_MODE_CNTL, 0x0 },
316 { A6XX_RB_NC_MODE_CNTL, 0x0 },
317 { A6XX_TPL1_NC_MODE_CNTL, 0x0 },
318 { A6XX_SP_NC_MODE_CNTL, 0x0 },
319 { A6XX_PC_DBG_ECO_CNTL, 0x0 },
320 { A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x0 },
321};
322
Tarun Karra4ea68122017-11-02 18:10:31 -0700323/* IFPC only static powerup restore list */
324static struct reg_list_pair a6xx_ifpc_pwrup_reglist[] = {
325 { A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x0 },
326 { A6XX_CP_CHICKEN_DBG, 0x0 },
327 { A6XX_CP_ADDR_MODE_CNTL, 0x0 },
328 { A6XX_CP_DBG_ECO_CNTL, 0x0 },
329 { A6XX_CP_PROTECT_CNTL, 0x0 },
330 { A6XX_CP_PROTECT_REG, 0x0 },
331 { A6XX_CP_PROTECT_REG+1, 0x0 },
332 { A6XX_CP_PROTECT_REG+2, 0x0 },
333 { A6XX_CP_PROTECT_REG+3, 0x0 },
334 { A6XX_CP_PROTECT_REG+4, 0x0 },
335 { A6XX_CP_PROTECT_REG+5, 0x0 },
336 { A6XX_CP_PROTECT_REG+6, 0x0 },
337 { A6XX_CP_PROTECT_REG+7, 0x0 },
338 { A6XX_CP_PROTECT_REG+8, 0x0 },
339 { A6XX_CP_PROTECT_REG+9, 0x0 },
340 { A6XX_CP_PROTECT_REG+10, 0x0 },
341 { A6XX_CP_PROTECT_REG+11, 0x0 },
342 { A6XX_CP_PROTECT_REG+12, 0x0 },
343 { A6XX_CP_PROTECT_REG+13, 0x0 },
344 { A6XX_CP_PROTECT_REG+14, 0x0 },
345 { A6XX_CP_PROTECT_REG+15, 0x0 },
346 { A6XX_CP_PROTECT_REG+16, 0x0 },
347 { A6XX_CP_PROTECT_REG+17, 0x0 },
348 { A6XX_CP_PROTECT_REG+18, 0x0 },
349 { A6XX_CP_PROTECT_REG+19, 0x0 },
350 { A6XX_CP_PROTECT_REG+20, 0x0 },
351 { A6XX_CP_PROTECT_REG+21, 0x0 },
352 { A6XX_CP_PROTECT_REG+22, 0x0 },
353 { A6XX_CP_PROTECT_REG+23, 0x0 },
354 { A6XX_CP_PROTECT_REG+24, 0x0 },
355 { A6XX_CP_PROTECT_REG+25, 0x0 },
356 { A6XX_CP_PROTECT_REG+26, 0x0 },
357 { A6XX_CP_PROTECT_REG+27, 0x0 },
358 { A6XX_CP_PROTECT_REG+28, 0x0 },
359 { A6XX_CP_PROTECT_REG+29, 0x0 },
360 { A6XX_CP_PROTECT_REG+30, 0x0 },
361 { A6XX_CP_PROTECT_REG+31, 0x0 },
362 { A6XX_CP_AHB_CNTL, 0x0 },
363};
364
Deepak Kumarab6b8952017-12-18 11:18:37 +0530365static struct reg_list_pair a615_ifpc_pwrup_reglist[] = {
366 { A6XX_UCHE_GBIF_GX_CONFIG, 0x0 },
367};
368
Carter Cooper6ce00422017-03-20 11:25:09 -0600369static void _update_always_on_regs(struct adreno_device *adreno_dev)
370{
371 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
372 unsigned int *const regs = gpudev->reg_offsets->offsets;
373
374 regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO] =
375 A6XX_CP_ALWAYS_ON_COUNTER_LO;
376 regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI] =
377 A6XX_CP_ALWAYS_ON_COUNTER_HI;
378}
379
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600380static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev)
381{
382 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
383
384 if (kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
Tarun Karraa6674362017-10-23 12:57:48 -0700385 PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600386 "powerup_register_list")) {
387 adreno_dev->pwrup_reglist.gpuaddr = 0;
388 return;
389 }
390
391 kgsl_sharedmem_set(device, &adreno_dev->pwrup_reglist, 0, 0,
392 PAGE_SIZE);
393}
394
Shrenuj Bansal41665402016-12-16 15:25:54 -0800395static void a6xx_init(struct adreno_device *adreno_dev)
396{
397 a6xx_crashdump_init(adreno_dev);
Carter Cooper6ce00422017-03-20 11:25:09 -0600398
399 /*
400 * If the GMU is not enabled, rewrite the offset for the always on
401 * counters to point to the CP always on instead of GMU always on
402 */
403 if (!kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
404 _update_always_on_regs(adreno_dev);
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600405
406 a6xx_pwrup_reglist_init(adreno_dev);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800407}
408
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700409/**
410 * a6xx_protect_init() - Initializes register protection on a6xx
411 * @device: Pointer to the device structure
412 * Performs register writes to enable protected access to sensitive
413 * registers
414 */
415static void a6xx_protect_init(struct adreno_device *adreno_dev)
416{
417 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Tarun Karra9f945502017-03-23 12:28:03 -0700418 struct kgsl_protected_registers *mmu_prot =
419 kgsl_mmu_get_prot_regs(&device->mmu);
420 int i, num_sets;
421 int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
422 int max_sets = adreno_dev->gpucore->num_protected_regs;
423 unsigned int mmu_base = 0, mmu_range = 0, cur_range;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700424
425 /* enable access protection to privileged registers */
Harshdeep Dhatt9fc043e2017-04-21 12:06:22 -0600426 kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700427
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530428 if (mmu_prot) {
429 mmu_base = mmu_prot->base;
430 mmu_range = 1 << mmu_prot->range;
Tarun Karra9f945502017-03-23 12:28:03 -0700431 req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530432 }
433
Tarun Karra9f945502017-03-23 12:28:03 -0700434 if (req_sets > max_sets)
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530435 WARN(1, "Size exceeds the num of protection regs available\n");
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530436
Tarun Karra9f945502017-03-23 12:28:03 -0700437 /* Protect GPU registers */
438 num_sets = min_t(unsigned int,
439 ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
440 for (i = 0; i < num_sets; i++) {
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700441 struct a6xx_protected_regs *regs =
442 &a6xx_protected_regs_group[i];
443
444 kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
445 regs->base | (regs->count << 18) |
446 (regs->read_protect << 31));
447 }
448
Tarun Karra9f945502017-03-23 12:28:03 -0700449 /* Protect MMU registers */
450 if (mmu_prot) {
451 while ((i < max_sets) && (mmu_range > 0)) {
452 cur_range = min_t(unsigned int, mmu_range,
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530453 0x2000);
Tarun Karra9f945502017-03-23 12:28:03 -0700454 kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
455 mmu_base | ((cur_range - 1) << 18) | (1 << 31));
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530456
Tarun Karra9f945502017-03-23 12:28:03 -0700457 mmu_base += cur_range;
458 mmu_range -= cur_range;
459 i++;
460 }
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530461 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700462}
463
464static void a6xx_enable_64bit(struct adreno_device *adreno_dev)
465{
466 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
467
468 kgsl_regwrite(device, A6XX_CP_ADDR_MODE_CNTL, 0x1);
469 kgsl_regwrite(device, A6XX_VSC_ADDR_MODE_CNTL, 0x1);
470 kgsl_regwrite(device, A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
471 kgsl_regwrite(device, A6XX_RB_ADDR_MODE_CNTL, 0x1);
472 kgsl_regwrite(device, A6XX_PC_ADDR_MODE_CNTL, 0x1);
473 kgsl_regwrite(device, A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
474 kgsl_regwrite(device, A6XX_VFD_ADDR_MODE_CNTL, 0x1);
475 kgsl_regwrite(device, A6XX_VPC_ADDR_MODE_CNTL, 0x1);
476 kgsl_regwrite(device, A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
477 kgsl_regwrite(device, A6XX_SP_ADDR_MODE_CNTL, 0x1);
478 kgsl_regwrite(device, A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
479 kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
480}
481
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530482static inline unsigned int
483__get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev)
484{
485 if (adreno_is_a615(adreno_dev))
486 return 0x8AA8AA82;
487 else
488 return 0x8AA8AA02;
489}
490
491static inline unsigned int
492__get_gmu_ao_cgc_mode_cntl(struct adreno_device *adreno_dev)
493{
494 if (adreno_is_a615(adreno_dev))
495 return 0x00000222;
496 else
497 return 0x00020222;
498}
499
500static inline unsigned int
501__get_gmu_ao_cgc_delay_cntl(struct adreno_device *adreno_dev)
502{
503 if (adreno_is_a615(adreno_dev))
504 return 0x00000111;
505 else
506 return 0x00010111;
507}
508
509static inline unsigned int
510__get_gmu_ao_cgc_hyst_cntl(struct adreno_device *adreno_dev)
511{
512 if (adreno_is_a615(adreno_dev))
513 return 0x00000555;
514 else
515 return 0x00005555;
516}
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700517
518static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
519{
520 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
521 const struct kgsl_hwcg_reg *regs;
Oleg Perelet88e54492017-09-22 11:10:31 -0700522 unsigned int value;
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700523 int i, j;
524
525 if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
Oleg Perelet88e54492017-09-22 11:10:31 -0700526 on = false;
527
528 if (kgsl_gmu_isenabled(device)) {
529 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530530 on ? __get_gmu_ao_cgc_mode_cntl(adreno_dev) : 0);
Oleg Perelet88e54492017-09-22 11:10:31 -0700531 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530532 on ? __get_gmu_ao_cgc_delay_cntl(adreno_dev) : 0);
Oleg Perelet88e54492017-09-22 11:10:31 -0700533 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530534 on ? __get_gmu_ao_cgc_hyst_cntl(adreno_dev) : 0);
Oleg Perelet88e54492017-09-22 11:10:31 -0700535 }
536
537 kgsl_regread(device, A6XX_RBBM_CLOCK_CNTL, &value);
538
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530539 if (value == __get_rbbm_clock_cntl_on(adreno_dev) && on)
Oleg Perelet88e54492017-09-22 11:10:31 -0700540 return;
541
542 if (value == 0 && !on)
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700543 return;
544
545 for (i = 0; i < ARRAY_SIZE(a6xx_hwcg_registers); i++) {
546 if (a6xx_hwcg_registers[i].devfunc(adreno_dev))
547 break;
548 }
549
550 if (i == ARRAY_SIZE(a6xx_hwcg_registers))
551 return;
552
553 regs = a6xx_hwcg_registers[i].regs;
554
555 /* Disable SP clock before programming HWCG registers */
Deepak Kumar9892ba12017-07-07 14:51:11 +0530556 kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700557
558 for (j = 0; j < a6xx_hwcg_registers[i].count; j++)
559 kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
560
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700561 /* Enable SP clock */
562 kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
563
564 /* enable top level HWCG */
Oleg Perelet88e54492017-09-22 11:10:31 -0700565 kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL,
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530566 on ? __get_rbbm_clock_cntl_on(adreno_dev) : 0);
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700567}
568
Oleg Pereletc2ab7f72017-06-22 16:45:57 -0700569#define LM_DEFAULT_LIMIT 6000
570
571static uint32_t lm_limit(struct adreno_device *adreno_dev)
572{
573 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
574
575 if (adreno_dev->lm_limit)
576 return adreno_dev->lm_limit;
577
578 if (of_property_read_u32(device->pdev->dev.of_node, "qcom,lm-limit",
579 &adreno_dev->lm_limit))
580 adreno_dev->lm_limit = LM_DEFAULT_LIMIT;
581
582 return adreno_dev->lm_limit;
583}
584
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600585static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
586{
587 uint32_t i;
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600588 struct cpu_gpu_lock *lock;
Tarun Karra4ea68122017-11-02 18:10:31 -0700589 struct reg_list_pair *r;
Deepak Kumarab6b8952017-12-18 11:18:37 +0530590 uint16_t a615_list_size = 0;
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600591
592 /* Set up the register values */
Tarun Karra4ea68122017-11-02 18:10:31 -0700593 for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_pwrup_reglist); i++) {
594 r = &a6xx_ifpc_pwrup_reglist[i];
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600595 kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
596 }
597
Tarun Karra4ea68122017-11-02 18:10:31 -0700598 for (i = 0; i < ARRAY_SIZE(a6xx_pwrup_reglist); i++) {
599 r = &a6xx_pwrup_reglist[i];
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600600 kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
601 }
602
Deepak Kumarab6b8952017-12-18 11:18:37 +0530603 if (adreno_is_a615(adreno_dev)) {
604 for (i = 0; i < ARRAY_SIZE(a615_ifpc_pwrup_reglist); i++) {
605 r = &a615_ifpc_pwrup_reglist[i];
606 kgsl_regread(KGSL_DEVICE(adreno_dev),
607 r->offset, &r->val);
608 }
609
610 a615_list_size = sizeof(a615_ifpc_pwrup_reglist);
611
612 memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
613 a615_ifpc_pwrup_reglist, a615_list_size);
614 }
615
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600616 lock = (struct cpu_gpu_lock *) adreno_dev->pwrup_reglist.hostptr;
617 lock->flag_ucode = 0;
618 lock->flag_kmd = 0;
619 lock->turn = 0;
620
621 /*
622 * The overall register list is composed of
623 * 1. Static IFPC-only registers
624 * 2. Static IFPC + preemption registers
625 * 2. Dynamic IFPC + preemption registers (ex: perfcounter selects)
626 *
627 * The CP views the second and third entries as one dynamic list
628 * starting from list_offset. Thus, list_length should be the sum
629 * of all three lists above (of which the third list will start off
630 * empty). And list_offset should be specified as the size in dwords
631 * of the static IFPC-only register list.
632 */
Tarun Karra4ea68122017-11-02 18:10:31 -0700633 lock->list_length = (sizeof(a6xx_ifpc_pwrup_reglist) +
Deepak Kumarab6b8952017-12-18 11:18:37 +0530634 sizeof(a6xx_pwrup_reglist) + a615_list_size) >> 2;
635 lock->list_offset = (sizeof(a6xx_ifpc_pwrup_reglist) +
636 a615_list_size) >> 2;
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600637
Deepak Kumarab6b8952017-12-18 11:18:37 +0530638 memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
639 + a615_list_size,
Tarun Karra4ea68122017-11-02 18:10:31 -0700640 a6xx_ifpc_pwrup_reglist, sizeof(a6xx_ifpc_pwrup_reglist));
641 memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
Deepak Kumarab6b8952017-12-18 11:18:37 +0530642 + sizeof(a6xx_ifpc_pwrup_reglist) + a615_list_size,
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600643 a6xx_pwrup_reglist, sizeof(a6xx_pwrup_reglist));
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600644}
645
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700646/*
647 * a6xx_start() - Device start
648 * @adreno_dev: Pointer to adreno device
649 *
650 * a6xx device start
651 */
652static void a6xx_start(struct adreno_device *adreno_dev)
653{
654 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700655 unsigned int bit, mal, mode, glbl_inv;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700656 unsigned int amsbc = 0;
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600657 static bool patch_reglist;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700658
Oleg Perelet62d5cec2017-03-27 16:14:52 -0700659 /* runtime adjust callbacks based on feature sets */
660 if (!kgsl_gmu_isenabled(device))
661 /* Legacy idle management if gmu is disabled */
662 ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700663 /* enable hardware clockgating */
664 a6xx_hwcg_set(adreno_dev, true);
Oleg Perelet62d5cec2017-03-27 16:14:52 -0700665
Oleg Pereletc2ab7f72017-06-22 16:45:57 -0700666 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM))
667 adreno_dev->lm_threshold_count = A6XX_GMU_GENERAL_1;
668
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700669 adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
670 ARRAY_SIZE(a6xx_vbif_platforms));
Harshdeep Dhatt75dbd412017-05-16 17:12:27 -0600671
Deepak Kumar9cd40032017-12-27 13:02:10 +0530672 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW))
673 kgsl_regwrite(device, A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9);
674
Harshdeep Dhatt75dbd412017-05-16 17:12:27 -0600675 /* Make all blocks contribute to the GPU BUSY perf counter */
676 kgsl_regwrite(device, A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
677
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700678 /*
679 * Set UCHE_WRITE_THRU_BASE to the UCHE_TRAP_BASE effectively
680 * disabling L2 bypass
681 */
682 kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
683 kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
684 kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
685 kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
686 kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
687 kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
688
689 /* Program the GMEM VA range for the UCHE path */
690 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_LO,
691 ADRENO_UCHE_GMEM_BASE);
692 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
693 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_LO,
694 ADRENO_UCHE_GMEM_BASE +
695 adreno_dev->gmem_size - 1);
696 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
697
698 kgsl_regwrite(device, A6XX_UCHE_FILTER_CNTL, 0x804);
699 kgsl_regwrite(device, A6XX_UCHE_CACHE_WAYS, 0x4);
700
701 kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x010000C0);
702 kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
703
704 /* Setting the mem pool size */
705 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 128);
706
707 /* Setting the primFifo thresholds default values */
708 kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
709
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700710 /* Set the AHB default slave response to "ERROR" */
711 kgsl_regwrite(device, A6XX_CP_AHB_CNTL, 0x1);
712
Harshdeep Dhatt859f3d62017-04-28 17:54:33 -0600713 /* Turn on performance counters */
714 kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);
715
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700716 if (of_property_read_u32(device->pdev->dev.of_node,
717 "qcom,highest-bank-bit", &bit))
718 bit = MIN_HBB;
719
720 if (of_property_read_u32(device->pdev->dev.of_node,
721 "qcom,min-access-length", &mal))
722 mal = 32;
723
724 if (of_property_read_u32(device->pdev->dev.of_node,
725 "qcom,ubwc-mode", &mode))
726 mode = 0;
727
728 switch (mode) {
729 case KGSL_UBWC_1_0:
730 mode = 1;
731 break;
732 case KGSL_UBWC_2_0:
733 mode = 0;
734 break;
735 case KGSL_UBWC_3_0:
736 mode = 0;
737 amsbc = 1; /* Only valid for A640 and A680 */
738 break;
739 default:
740 break;
741 }
742
743 if (bit >= 13 && bit <= 16)
744 bit = (bit - 13) & 0x03;
745 else
746 bit = 0;
747
748 mal = (mal == 64) ? 1 : 0;
749
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700750 /* (1 << 29)globalInvFlushFilterDis bit needs to be set for A630 V1 */
751 glbl_inv = (adreno_is_a630v1(adreno_dev)) ? 1 : 0;
752
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700753 kgsl_regwrite(device, A6XX_RB_NC_MODE_CNTL, (amsbc << 4) | (mal << 3) |
754 (bit << 1) | mode);
755 kgsl_regwrite(device, A6XX_TPL1_NC_MODE_CNTL, (mal << 3) |
756 (bit << 1) | mode);
757 kgsl_regwrite(device, A6XX_SP_NC_MODE_CNTL, (mal << 3) | (bit << 1) |
758 mode);
759
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700760 kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (glbl_inv << 29) |
761 (mal << 23) | (bit << 21));
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700762
Carter Cooperf43f2582017-08-17 17:07:42 -0600763 /* Set hang detection threshold to 0x1FFFFF * 16 cycles */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700764 kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
Carter Cooperf43f2582017-08-17 17:07:42 -0600765 (1 << 30) | 0x1fffff);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700766
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530767 kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
768
Lynus Vaz85c8cee2017-03-07 11:31:02 +0530769 /* Set TWOPASSUSEWFI in A6XX_PC_DBG_ECO_CNTL if requested */
770 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
771 kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
772
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600773 /* Enable the GMEM save/restore feature for preemption */
Harshdeep Dhatt7ee8a862017-11-20 17:51:54 -0700774 if (adreno_is_preemption_enabled(adreno_dev))
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600775 kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
776 0x1);
777
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600778 if (!patch_reglist && (adreno_dev->pwrup_reglist.gpuaddr != 0)) {
779 a6xx_patch_pwrup_reglist(adreno_dev);
780 patch_reglist = true;
781 }
782
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600783 a6xx_preemption_start(adreno_dev);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700784 a6xx_protect_init(adreno_dev);
Harshdeep Dhatt720394d2017-09-13 14:25:09 -0600785
786 /*
787 * We start LM here because we want all the following to be up
788 * 1. GX HS
789 * 2. SPTPRAC
790 * 3. HFI
791 * At this point, we are guaranteed all.
792 */
793 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
794 test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
Harshdeep Dhattc116c0f2017-09-13 14:45:10 -0600795 int result;
796 struct gmu_device *gmu = &device->gmu;
797 struct device *dev = &gmu->pdev->dev;
798
Harshdeep Dhatt720394d2017-09-13 14:25:09 -0600799 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD,
800 GPU_LIMIT_THRESHOLD_ENABLE | lm_limit(adreno_dev));
801 kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
802 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL, 0x1);
Harshdeep Dhattc116c0f2017-09-13 14:45:10 -0600803
804 gmu->lm_config.lm_type = 1;
805 gmu->lm_config.lm_sensor_type = 1;
806 gmu->lm_config.throttle_config = 1;
807 gmu->lm_config.idle_throttle_en = 0;
808 gmu->lm_config.acd_en = 0;
809 gmu->bcl_config = 0;
810 gmu->lm_dcvs_level = 0;
811
812 result = hfi_send_lmconfig(gmu);
813 if (result)
814 dev_err(dev, "Failure enabling limits management (%d)\n",
815 result);
Harshdeep Dhatt720394d2017-09-13 14:25:09 -0600816 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700817}
818
819/*
820 * a6xx_microcode_load() - Load microcode
821 * @adreno_dev: Pointer to adreno device
822 */
823static int a6xx_microcode_load(struct adreno_device *adreno_dev)
824{
825 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
826 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
827 uint64_t gpuaddr;
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -0600828 void *zap;
Carter Cooper4a313ae2017-02-23 11:11:56 -0700829 int ret = 0;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700830
831 gpuaddr = fw->memdesc.gpuaddr;
832 kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_LO,
833 lower_32_bits(gpuaddr));
834 kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_HI,
835 upper_32_bits(gpuaddr));
836
Carter Cooper4a313ae2017-02-23 11:11:56 -0700837 /* Load the zap shader firmware through PIL if its available */
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -0600838 if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
Carter Cooper4a313ae2017-02-23 11:11:56 -0700839 zap = subsystem_get(adreno_dev->gpucore->zap_name);
840
841 /* Return error if the zap shader cannot be loaded */
842 if (IS_ERR_OR_NULL(zap)) {
843 ret = (zap == NULL) ? -ENODEV : PTR_ERR(zap);
844 zap = NULL;
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -0600845 } else
846 adreno_dev->zap_loaded = 1;
Carter Cooper4a313ae2017-02-23 11:11:56 -0700847 }
848
849 return ret;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700850}
851
852
853/*
854 * CP_INIT_MAX_CONTEXT bit tells if the multiple hardware contexts can
855 * be used at once of if they should be serialized
856 */
857#define CP_INIT_MAX_CONTEXT BIT(0)
858
859/* Enables register protection mode */
860#define CP_INIT_ERROR_DETECTION_CONTROL BIT(1)
861
862/* Header dump information */
863#define CP_INIT_HEADER_DUMP BIT(2) /* Reserved */
864
865/* Default Reset states enabled for PFP and ME */
866#define CP_INIT_DEFAULT_RESET_STATE BIT(3)
867
868/* Drawcall filter range */
869#define CP_INIT_DRAWCALL_FILTER_RANGE BIT(4)
870
871/* Ucode workaround masks */
872#define CP_INIT_UCODE_WORKAROUND_MASK BIT(5)
873
Jonathan Wicks20b1df92017-07-31 11:38:32 -0600874/*
875 * Operation mode mask
876 *
877 * This ordinal provides the option to disable the
878 * save/restore of performance counters across preemption.
879 */
880#define CP_INIT_OPERATION_MODE_MASK BIT(6)
881
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600882/* Register initialization list */
883#define CP_INIT_REGISTER_INIT_LIST BIT(7)
884
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600885/* Register initialization list with spinlock */
886#define CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK BIT(8)
887
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700888#define CP_INIT_MASK (CP_INIT_MAX_CONTEXT | \
889 CP_INIT_ERROR_DETECTION_CONTROL | \
890 CP_INIT_HEADER_DUMP | \
891 CP_INIT_DEFAULT_RESET_STATE | \
Jonathan Wicks20b1df92017-07-31 11:38:32 -0600892 CP_INIT_UCODE_WORKAROUND_MASK | \
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600893 CP_INIT_OPERATION_MODE_MASK | \
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600894 CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK)
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700895
896static void _set_ordinals(struct adreno_device *adreno_dev,
897 unsigned int *cmds, unsigned int count)
898{
899 unsigned int *start = cmds;
900
901 /* Enabled ordinal mask */
902 *cmds++ = CP_INIT_MASK;
903
904 if (CP_INIT_MASK & CP_INIT_MAX_CONTEXT)
905 *cmds++ = 0x00000003;
906
907 if (CP_INIT_MASK & CP_INIT_ERROR_DETECTION_CONTROL)
908 *cmds++ = 0x20000000;
909
910 if (CP_INIT_MASK & CP_INIT_HEADER_DUMP) {
911 /* Header dump address */
912 *cmds++ = 0x00000000;
913 /* Header dump enable and dump size */
914 *cmds++ = 0x00000000;
915 }
916
917 if (CP_INIT_MASK & CP_INIT_DRAWCALL_FILTER_RANGE) {
918 /* Start range */
919 *cmds++ = 0x00000000;
920 /* End range (inclusive) */
921 *cmds++ = 0x00000000;
922 }
923
924 if (CP_INIT_MASK & CP_INIT_UCODE_WORKAROUND_MASK)
925 *cmds++ = 0x00000000;
926
Jonathan Wicks20b1df92017-07-31 11:38:32 -0600927 if (CP_INIT_MASK & CP_INIT_OPERATION_MODE_MASK)
928 *cmds++ = 0x00000002;
929
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600930 if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK) {
931 uint64_t gpuaddr = adreno_dev->pwrup_reglist.gpuaddr;
932
933 *cmds++ = lower_32_bits(gpuaddr);
934 *cmds++ = upper_32_bits(gpuaddr);
935 *cmds++ = 0;
936
937 } else if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST) {
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600938 uint64_t gpuaddr = adreno_dev->pwrup_reglist.gpuaddr;
939
940 *cmds++ = lower_32_bits(gpuaddr);
941 *cmds++ = upper_32_bits(gpuaddr);
942 /* Size is in dwords */
Tarun Karra4ea68122017-11-02 18:10:31 -0700943 *cmds++ = (sizeof(a6xx_ifpc_pwrup_reglist) +
944 sizeof(a6xx_pwrup_reglist)) >> 2;
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600945 }
946
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700947 /* Pad rest of the cmds with 0's */
948 while ((unsigned int)(cmds - start) < count)
949 *cmds++ = 0x0;
950}
951
952/*
953 * a6xx_send_cp_init() - Initialize ringbuffer
954 * @adreno_dev: Pointer to adreno device
955 * @rb: Pointer to the ringbuffer of device
956 *
957 * Submit commands for ME initialization,
958 */
959static int a6xx_send_cp_init(struct adreno_device *adreno_dev,
960 struct adreno_ringbuffer *rb)
961{
962 unsigned int *cmds;
963 int ret;
964
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600965 cmds = adreno_ringbuffer_allocspace(rb, 12);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700966 if (IS_ERR(cmds))
967 return PTR_ERR(cmds);
968
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600969 *cmds++ = cp_type7_packet(CP_ME_INIT, 11);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700970
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600971 _set_ordinals(adreno_dev, cmds, 11);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700972
973 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
974 if (ret)
Carter Cooper8567af02017-03-15 14:22:03 -0600975 adreno_spin_idle_debug(adreno_dev,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700976 "CP initialization failed to idle\n");
977
978 return ret;
979}
980
981/*
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600982 * Follow the ME_INIT sequence with a preemption yield to allow the GPU to move
983 * to a different ringbuffer, if desired
984 */
985static int _preemption_init(struct adreno_device *adreno_dev,
986 struct adreno_ringbuffer *rb, unsigned int *cmds,
987 struct kgsl_context *context)
988{
989 unsigned int *cmds_orig = cmds;
990
991 /* Turn CP protection OFF */
992 *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
993 *cmds++ = 0;
994
995 *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 6);
996 *cmds++ = 1;
997 cmds += cp_gpuaddr(adreno_dev, cmds,
998 rb->preemption_desc.gpuaddr);
999
1000 *cmds++ = 2;
Harshdeep Dhatt58b70eb2017-03-28 09:21:40 -06001001 cmds += cp_gpuaddr(adreno_dev, cmds,
1002 rb->secure_preemption_desc.gpuaddr);
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06001003
1004 /* Turn CP protection ON */
1005 *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
1006 *cmds++ = 1;
1007
1008 *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
1009 cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
1010 *cmds++ = 0;
1011 /* generate interrupt on preemption completion */
1012 *cmds++ = 0;
1013
1014 return cmds - cmds_orig;
1015}
1016
1017static int a6xx_post_start(struct adreno_device *adreno_dev)
1018{
1019 int ret;
1020 unsigned int *cmds, *start;
1021 struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
1022 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1023
Harshdeep Dhatt7ee8a862017-11-20 17:51:54 -07001024 if (!adreno_is_preemption_enabled(adreno_dev))
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06001025 return 0;
1026
1027 cmds = adreno_ringbuffer_allocspace(rb, 42);
1028 if (IS_ERR(cmds)) {
1029 KGSL_DRV_ERR(device, "error allocating preemption init cmds");
1030 return PTR_ERR(cmds);
1031 }
1032 start = cmds;
1033
1034 cmds += _preemption_init(adreno_dev, rb, cmds, NULL);
1035
1036 rb->_wptr = rb->_wptr - (42 - (cmds - start));
1037
1038 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
1039 if (ret)
1040 adreno_spin_idle_debug(adreno_dev,
1041 "hw preemption initialization failed to idle\n");
1042
1043 return ret;
1044}
1045
1046/*
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001047 * a6xx_rb_start() - Start the ringbuffer
1048 * @adreno_dev: Pointer to adreno device
1049 * @start_type: Warm or cold start
1050 */
1051static int a6xx_rb_start(struct adreno_device *adreno_dev,
1052 unsigned int start_type)
1053{
1054 struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
1055 struct kgsl_device *device = &adreno_dev->dev;
1056 uint64_t addr;
1057 int ret;
1058
1059 addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
1060
1061 adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
1062 ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
1063
1064 /*
1065 * The size of the ringbuffer in the hardware is the log2
1066 * representation of the size in quadwords (sizedwords / 2).
1067 */
1068 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
1069 A6XX_CP_RB_CNTL_DEFAULT);
1070
1071 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
1072 rb->buffer_desc.gpuaddr);
1073
1074 ret = a6xx_microcode_load(adreno_dev);
1075 if (ret)
1076 return ret;
1077
1078 /* Clear the SQE_HALT to start the CP engine */
1079 kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
1080
Carter Cooper4a313ae2017-02-23 11:11:56 -07001081 ret = a6xx_send_cp_init(adreno_dev, rb);
1082 if (ret)
1083 return ret;
1084
1085 /* GPU comes up in secured mode, make it unsecured by default */
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06001086 ret = adreno_set_unsecured_mode(adreno_dev, rb);
1087 if (ret)
1088 return ret;
1089
1090 return a6xx_post_start(adreno_dev);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001091}
1092
Kyle Pieferedc6c8a2017-11-10 14:51:58 -08001093unsigned int a6xx_set_marker(
1094 unsigned int *cmds, enum adreno_cp_marker_type type)
1095{
1096 unsigned int cmd = 0;
1097
1098 *cmds++ = cp_type7_packet(CP_SET_MARKER, 1);
1099
1100 /*
1101 * Indicate the beginning and end of the IB1 list with a SET_MARKER.
1102 * Among other things, this will implicitly enable and disable
1103 * preemption respectively. IFPC can also be disabled and enabled
1104 * with a SET_MARKER. Bit 8 tells the CP the marker is for IFPC.
1105 */
1106 switch (type) {
1107 case IFPC_DISABLE:
1108 cmd = 0x101;
1109 break;
1110 case IFPC_ENABLE:
1111 cmd = 0x100;
1112 break;
1113 case IB1LIST_START:
1114 cmd = 0xD;
1115 break;
1116 case IB1LIST_END:
1117 cmd = 0xE;
1118 break;
1119 }
1120
1121 *cmds++ = cmd;
1122 return 2;
1123}
1124
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001125static int _load_firmware(struct kgsl_device *device, const char *fwfile,
1126 struct adreno_firmware *firmware)
1127{
1128 const struct firmware *fw = NULL;
1129 int ret;
1130
1131 ret = request_firmware(&fw, fwfile, device->dev);
1132
1133 if (ret) {
1134 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
1135 fwfile, ret);
1136 return ret;
1137 }
1138
1139 ret = kgsl_allocate_global(device, &firmware->memdesc, fw->size - 4,
1140 KGSL_MEMFLAGS_GPUREADONLY, 0, "ucode");
1141
1142 if (!ret) {
1143 memcpy(firmware->memdesc.hostptr, &fw->data[4], fw->size - 4);
1144 firmware->size = (fw->size - 4) / sizeof(uint32_t);
1145 firmware->version = *(unsigned int *)&fw->data[4];
1146 }
1147
1148 release_firmware(fw);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001149 return ret;
1150}
1151
Kyle Pieferb1027b02017-02-10 13:58:58 -08001152#define RSC_CMD_OFFSET 2
1153#define PDC_CMD_OFFSET 4
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001154
Kyle Pieferb1027b02017-02-10 13:58:58 -08001155static void _regwrite(void __iomem *regbase,
1156 unsigned int offsetwords, unsigned int value)
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001157{
Kyle Pieferb1027b02017-02-10 13:58:58 -08001158 void __iomem *reg;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001159
Kyle Pieferb1027b02017-02-10 13:58:58 -08001160 reg = regbase + (offsetwords << 2);
1161 __raw_writel(value, reg);
1162}
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001163
Kyle Pieferb1027b02017-02-10 13:58:58 -08001164/*
1165 * _load_gmu_rpmh_ucode() - Load the ucode into the GPU PDC/RSC blocks
1166 * PDC and RSC execute GPU power on/off RPMh sequence
1167 * @device: Pointer to KGSL device
1168 */
1169static void _load_gmu_rpmh_ucode(struct kgsl_device *device)
1170{
Kyle Piefer8e377172017-08-10 13:24:09 -07001171 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001172 struct gmu_device *gmu = &device->gmu;
1173
Kyle Piefer8e377172017-08-10 13:24:09 -07001174 /* Disable SDE clock gating */
1175 kgsl_gmu_regwrite(device, A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
1176
Kyle Pieferb1027b02017-02-10 13:58:58 -08001177 /* Setup RSC PDC handshake for sleep and wakeup */
1178 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
1179 kgsl_gmu_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
1180 kgsl_gmu_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
1181 kgsl_gmu_regwrite(device,
1182 A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET, 0);
1183 kgsl_gmu_regwrite(device,
1184 A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET, 0);
1185 kgsl_gmu_regwrite(device,
1186 A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET * 2,
1187 0x80000000);
1188 kgsl_gmu_regwrite(device,
1189 A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET * 2,
1190 0);
1191 kgsl_gmu_regwrite(device, A6XX_RSCC_OVERRIDE_START_ADDR, 0);
1192 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
1193 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
1194 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
1195
Kyle Piefer8e377172017-08-10 13:24:09 -07001196 /* Enable timestamp event for v1 only */
1197 if (adreno_is_a630v1(adreno_dev))
1198 kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001199
1200 /* Load RSC sequencer uCode for sleep and wakeup */
1201 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0, 0xA7A506A0);
1202 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xA1E6A6E7);
1203 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xA2E081E1);
1204 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xE9A982E2);
1205 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020E8A8);
1206
1207 /* Load PDC sequencer uCode for power up and power down sequence */
Kyle Piefer8e377172017-08-10 13:24:09 -07001208 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0, 0xFEBEA1E1);
1209 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 1, 0xA5A4A3A2);
1210 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 2, 0x8382A6E0);
1211 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 3, 0xBCE3E284);
1212 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 4, 0x002081FC);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001213
1214 /* Set TCS commands used by PDC sequence for low power modes */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001215 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
1216 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
1217 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CONTROL, 0);
1218 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
1219 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
Kyle Piefer87149182017-10-05 15:01:33 -07001220 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_DATA, 1);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001221 _regwrite(gmu->pdc_reg_virt,
1222 PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
1223 _regwrite(gmu->pdc_reg_virt,
1224 PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
1225 _regwrite(gmu->pdc_reg_virt,
Kyle Piefer87149182017-10-05 15:01:33 -07001226 PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET, 0x0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001227 _regwrite(gmu->pdc_reg_virt,
1228 PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
1229 _regwrite(gmu->pdc_reg_virt,
1230 PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET * 2, 0x30080);
1231 _regwrite(gmu->pdc_reg_virt,
Kyle Piefer87149182017-10-05 15:01:33 -07001232 PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x0);
1233 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
1234 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
1235 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CONTROL, 0);
1236 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
1237 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
1238 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CMD0_DATA, 2);
1239 _regwrite(gmu->pdc_reg_virt,
1240 PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
1241 _regwrite(gmu->pdc_reg_virt,
1242 PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
1243 _regwrite(gmu->pdc_reg_virt,
1244 PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x3);
1245 _regwrite(gmu->pdc_reg_virt,
1246 PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
1247 _regwrite(gmu->pdc_reg_virt,
1248 PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET * 2, 0x30080);
1249 _regwrite(gmu->pdc_reg_virt,
1250 PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x3);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001251
1252 /* Setup GPU PDC */
1253 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_START_ADDR, 0);
1254 _regwrite(gmu->pdc_reg_virt, PDC_GPU_ENABLE_PDC, 0x80000001);
1255
1256 /* ensure no writes happen before the uCode is fully written */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001257 wmb();
Kyle Pieferb1027b02017-02-10 13:58:58 -08001258}
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001259
Kyle Piefere923b7a2017-03-28 17:31:48 -07001260#define GMU_START_TIMEOUT 10 /* ms */
1261#define GPU_START_TIMEOUT 100 /* ms */
1262#define GPU_RESET_TIMEOUT 1 /* ms */
1263#define GPU_RESET_TIMEOUT_US 10 /* us */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001264
Kyle Pieferb1027b02017-02-10 13:58:58 -08001265/*
1266 * timed_poll_check() - polling *gmu* register at given offset until
1267 * its value changed to match expected value. The function times
1268 * out and returns after given duration if register is not updated
1269 * as expected.
1270 *
1271 * @device: Pointer to KGSL device
1272 * @offset: Register offset
1273 * @expected_ret: expected register value that stops polling
1274 * @timout: number of jiffies to abort the polling
1275 * @mask: bitmask to filter register value to match expected_ret
1276 */
1277static int timed_poll_check(struct kgsl_device *device,
1278 unsigned int offset, unsigned int expected_ret,
1279 unsigned int timeout, unsigned int mask)
1280{
1281 unsigned long t;
1282 unsigned int value;
1283
1284 t = jiffies + msecs_to_jiffies(timeout);
1285
Kyle Pieferd9e09dc2017-05-19 16:34:43 -07001286 do {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001287 kgsl_gmu_regread(device, offset, &value);
1288 if ((value & mask) == expected_ret)
1289 return 0;
George Shen56c9cdb2017-08-25 10:43:32 -07001290 /* Wait 100us to reduce unnecessary AHB bus traffic */
Oleg Perelet7f7f9f52017-10-31 10:02:45 -07001291 usleep_range(10, 100);
Kyle Pieferd9e09dc2017-05-19 16:34:43 -07001292 } while (!time_after(jiffies, t));
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001293
Carter Cooper1ee715a2017-09-07 16:08:38 -06001294 /* Double check one last time */
1295 kgsl_gmu_regread(device, offset, &value);
1296 if ((value & mask) == expected_ret)
1297 return 0;
1298
Kyle Pieferb1027b02017-02-10 13:58:58 -08001299 return -EINVAL;
1300}
1301
1302/*
Kyle Piefer4edfb6b2017-08-03 16:42:09 -07001303 * The lowest 16 bits of this value are the number of XO clock cycles
1304 * for main hysteresis. This is the first hysteresis. Here we set it
Kyle Pieferbfed9162017-10-13 13:29:00 -07001305 * to 0x1680 cycles, or 300 us. The highest 16 bits of this value are
Kyle Piefer4edfb6b2017-08-03 16:42:09 -07001306 * the number of XO clock cycles for short hysteresis. This happens
1307 * after main hysteresis. Here we set it to 0xA cycles, or 0.5 us.
1308 */
Kyle Pieferbfed9162017-10-13 13:29:00 -07001309#define GMU_PWR_COL_HYST 0x000A1680
Kyle Piefer4edfb6b2017-08-03 16:42:09 -07001310
1311/*
Kyle Pieferb1027b02017-02-10 13:58:58 -08001312 * a6xx_gmu_power_config() - Configure and enable GMU's low power mode
1313 * setting based on ADRENO feature flags.
1314 * @device: Pointer to KGSL device
1315 */
1316static void a6xx_gmu_power_config(struct kgsl_device *device)
1317{
1318 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1319 struct gmu_device *gmu = &device->gmu;
1320
Kyle Pieferd3964162017-04-06 15:44:03 -07001321 /* Configure registers for idle setting. The setting is cumulative */
George Shenc4c74262017-05-11 15:37:34 -07001322
George Shen1f312ab2017-08-01 10:53:50 -07001323 /* Disable GMU WB/RB buffer */
1324 kgsl_gmu_regwrite(device, A6XX_GMU_SYS_BUS_CONFIG, 0x1);
1325
George Shenc4c74262017-05-11 15:37:34 -07001326 kgsl_gmu_regwrite(device,
1327 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9C40400);
1328
Kyle Pieferd3964162017-04-06 15:44:03 -07001329 switch (gmu->idle_level) {
1330 case GPU_HW_MIN_VOLT:
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001331 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
1332 MIN_BW_ENABLE_MASK);
1333 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_HYST_CTRL, 0,
1334 MIN_BW_HYST);
Kyle Pieferd3964162017-04-06 15:44:03 -07001335 /* fall through */
1336 case GPU_HW_NAP:
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001337 kgsl_gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL, 0,
1338 HW_NAP_ENABLE_MASK);
Kyle Pieferd3964162017-04-06 15:44:03 -07001339 /* fall through */
1340 case GPU_HW_IFPC:
1341 kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
Kyle Piefer4edfb6b2017-08-03 16:42:09 -07001342 GMU_PWR_COL_HYST);
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001343 kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
Kyle Pieferd3964162017-04-06 15:44:03 -07001344 IFPC_ENABLE_MASK);
1345 /* fall through */
1346 case GPU_HW_SPTP_PC:
1347 kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
Kyle Piefer4edfb6b2017-08-03 16:42:09 -07001348 GMU_PWR_COL_HYST);
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001349 kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
Kyle Pieferd3964162017-04-06 15:44:03 -07001350 SPTP_ENABLE_MASK);
1351 /* fall through */
1352 default:
1353 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001354 }
1355
Kyle Piefer3a5ac092017-04-06 16:05:30 -07001356 /* ACD feature enablement */
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07001357 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
1358 test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001359 kgsl_gmu_regrmw(device, A6XX_GMU_BOOT_KMD_LM_HANDSHAKE, 0,
1360 BIT(10));
Kyle Piefer3a5ac092017-04-06 16:05:30 -07001361
Kyle Pieferb1027b02017-02-10 13:58:58 -08001362 /* Enable RPMh GPU client */
1363 if (ADRENO_FEATURE(adreno_dev, ADRENO_RPMH))
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001364 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
1365 RPMH_ENABLE_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001366}
1367
1368/*
1369 * a6xx_gmu_start() - Start GMU and wait until FW boot up.
1370 * @device: Pointer to KGSL device
1371 */
1372static int a6xx_gmu_start(struct kgsl_device *device)
1373{
1374 struct gmu_device *gmu = &device->gmu;
1375
1376 /* Write 1 first to make sure the GMU is reset */
1377 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);
1378
1379 /* Make sure putting in reset doesn't happen after clearing */
1380 wmb();
1381
1382 /* Bring GMU out of reset */
1383 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0);
1384 if (timed_poll_check(device,
1385 A6XX_GMU_CM3_FW_INIT_RESULT,
1386 0xBABEFACE,
1387 GMU_START_TIMEOUT,
1388 0xFFFFFFFF)) {
1389 dev_err(&gmu->pdev->dev, "GMU doesn't boot\n");
1390 return -ETIMEDOUT;
1391 }
1392
1393 return 0;
1394}
1395
1396/*
1397 * a6xx_gmu_hfi_start() - Write registers and start HFI.
1398 * @device: Pointer to KGSL device
1399 */
1400static int a6xx_gmu_hfi_start(struct kgsl_device *device)
1401{
1402 struct gmu_device *gmu = &device->gmu;
1403
Kyle Piefere7b06b42017-04-06 13:53:01 -07001404 kgsl_gmu_regrmw(device, A6XX_GMU_GMU2HOST_INTR_MASK,
1405 HFI_IRQ_MSGQ_MASK, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001406 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_CTRL_INIT, 1);
1407
1408 if (timed_poll_check(device,
1409 A6XX_GMU_HFI_CTRL_STATUS,
1410 BIT(0),
1411 GMU_START_TIMEOUT,
1412 BIT(0))) {
1413 dev_err(&gmu->pdev->dev, "GMU HFI init failed\n");
1414 return -ETIMEDOUT;
1415 }
1416
1417 return 0;
1418}
1419
1420/*
1421 * a6xx_oob_set() - Set OOB interrupt to GMU.
1422 * @adreno_dev: Pointer to adreno device
1423 * @set_mask: set_mask is a bitmask that defines a set of OOB
1424 * interrupts to trigger.
1425 * @check_mask: check_mask is a bitmask that provides a set of
1426 * OOB ACK bits. check_mask usually matches set_mask to
1427 * ensure OOBs are handled.
1428 * @clear_mask: After GMU handles a OOB interrupt, GMU driver
1429 * clears the interrupt. clear_mask is a bitmask defines
1430 * a set of OOB interrupts to clear.
1431 */
1432static int a6xx_oob_set(struct adreno_device *adreno_dev,
1433 unsigned int set_mask, unsigned int check_mask,
1434 unsigned int clear_mask)
1435{
1436 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001437 int ret = 0;
1438
1439 if (!kgsl_gmu_isenabled(device))
Kyle Pieferc75922e2017-05-18 15:05:07 -07001440 return 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001441
1442 kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, set_mask);
1443
1444 if (timed_poll_check(device,
1445 A6XX_GMU_GMU2HOST_INTR_INFO,
1446 check_mask,
1447 GPU_START_TIMEOUT,
1448 check_mask)) {
1449 ret = -ETIMEDOUT;
George Shen7201a6d2017-11-03 10:39:36 -07001450 WARN(1, "OOB set timed out, mask %x\n", set_mask);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001451 }
1452
1453 kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
1454
1455 trace_kgsl_gmu_oob_set(set_mask);
1456 return ret;
1457}
1458
1459/*
1460 * a6xx_oob_clear() - Clear a previously set OOB request.
1461 * @adreno_dev: Pointer to the adreno device that has the GMU
1462 * @clear_mask: Bitmask that provides the OOB bits to clear
1463 */
1464static inline void a6xx_oob_clear(struct adreno_device *adreno_dev,
1465 unsigned int clear_mask)
1466{
1467 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1468
1469 if (!kgsl_gmu_isenabled(device))
1470 return;
1471
1472 kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, clear_mask);
1473 trace_kgsl_gmu_oob_clear(clear_mask);
1474}
1475
Carter Cooperdf7ba702017-03-20 11:28:04 -06001476/*
1477 * a6xx_gpu_keepalive() - GMU reg write to request GPU stays on
1478 * @adreno_dev: Pointer to the adreno device that has the GMU
1479 * @state: State to set: true is ON, false is OFF
1480 */
1481static inline void a6xx_gpu_keepalive(struct adreno_device *adreno_dev,
1482 bool state)
1483{
1484 adreno_write_gmureg(adreno_dev,
1485 ADRENO_REG_GMU_PWR_COL_KEEPALIVE, state);
1486}
1487
Kyle Pieferb1027b02017-02-10 13:58:58 -08001488#define SPTPRAC_POWERON_CTRL_MASK 0x00778000
1489#define SPTPRAC_POWEROFF_CTRL_MASK 0x00778001
1490#define SPTPRAC_POWEROFF_STATUS_MASK BIT(2)
1491#define SPTPRAC_POWERON_STATUS_MASK BIT(3)
1492#define SPTPRAC_CTRL_TIMEOUT 10 /* ms */
Kyle Pieferfa50d3e2017-05-24 12:35:24 -07001493#define A6XX_RETAIN_FF_ENABLE_ENABLE_MASK BIT(11)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001494
1495/*
1496 * a6xx_sptprac_enable() - Power on SPTPRAC
1497 * @adreno_dev: Pointer to Adreno device
1498 */
1499static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
1500{
1501 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1502 struct gmu_device *gmu = &device->gmu;
1503
Kyle Piefer51dc0142017-04-14 12:32:49 -07001504 if (!gmu->pdev)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001505 return -EINVAL;
1506
1507 kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
1508 SPTPRAC_POWERON_CTRL_MASK);
1509
1510 if (timed_poll_check(device,
1511 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS,
1512 SPTPRAC_POWERON_STATUS_MASK,
1513 SPTPRAC_CTRL_TIMEOUT,
1514 SPTPRAC_POWERON_STATUS_MASK)) {
1515 dev_err(&gmu->pdev->dev, "power on SPTPRAC fail\n");
1516 return -EINVAL;
1517 }
1518
1519 return 0;
1520}
1521
1522/*
1523 * a6xx_sptprac_disable() - Power of SPTPRAC
1524 * @adreno_dev: Pointer to Adreno device
1525 */
1526static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
1527{
1528 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1529 struct gmu_device *gmu = &device->gmu;
1530
Kyle Piefer51dc0142017-04-14 12:32:49 -07001531 if (!gmu->pdev)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001532 return;
1533
Kyle Pieferfa50d3e2017-05-24 12:35:24 -07001534 /* Ensure that retention is on */
1535 kgsl_gmu_regrmw(device, A6XX_GPU_CC_GX_GDSCR, 0,
1536 A6XX_RETAIN_FF_ENABLE_ENABLE_MASK);
1537
Kyle Pieferb1027b02017-02-10 13:58:58 -08001538 kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
1539 SPTPRAC_POWEROFF_CTRL_MASK);
1540
1541 if (timed_poll_check(device,
1542 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS,
1543 SPTPRAC_POWEROFF_STATUS_MASK,
1544 SPTPRAC_CTRL_TIMEOUT,
1545 SPTPRAC_POWEROFF_STATUS_MASK))
1546 dev_err(&gmu->pdev->dev, "power off SPTPRAC fail\n");
1547}
1548
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001549#define SPTPRAC_POWER_OFF BIT(2)
1550#define SP_CLK_OFF BIT(4)
1551#define GX_GDSC_POWER_OFF BIT(6)
1552#define GX_CLK_OFF BIT(7)
1553
1554/*
1555 * a6xx_gx_is_on() - Check if GX is on using pwr status register
1556 * @adreno_dev - Pointer to adreno_device
1557 * This check should only be performed if the keepalive bit is set or it
1558 * can be guaranteed that the power state of the GPU will remain unchanged
1559 */
1560static bool a6xx_gx_is_on(struct adreno_device *adreno_dev)
1561{
1562 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1563 unsigned int val;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001564
1565 if (!kgsl_gmu_isenabled(device))
1566 return true;
1567
1568 kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
Kyle Pieferda0fa542017-08-04 13:39:40 -07001569 return !(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF));
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001570}
1571
1572/*
1573 * a6xx_sptprac_is_on() - Check if SPTP is on using pwr status register
1574 * @adreno_dev - Pointer to adreno_device
1575 * This check should only be performed if the keepalive bit is set or it
1576 * can be guaranteed that the power state of the GPU will remain unchanged
1577 */
1578static bool a6xx_sptprac_is_on(struct adreno_device *adreno_dev)
1579{
1580 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1581 unsigned int val;
1582
1583 if (!kgsl_gmu_isenabled(device))
1584 return true;
1585
1586 kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
1587 return !(val & (SPTPRAC_POWER_OFF | SP_CLK_OFF));
1588}
1589
Kyle Pieferb1027b02017-02-10 13:58:58 -08001590/*
1591 * a6xx_gfx_rail_on() - request GMU to power GPU at given OPP.
1592 * @device: Pointer to KGSL device
1593 *
1594 */
1595static int a6xx_gfx_rail_on(struct kgsl_device *device)
1596{
1597 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1598 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1599 struct gmu_device *gmu = &device->gmu;
1600 struct arc_vote_desc *default_opp;
1601 unsigned int perf_idx;
1602 int ret;
1603
1604 perf_idx = pwr->num_pwrlevels - pwr->default_pwrlevel - 1;
1605 default_opp = &gmu->rpmh_votes.gx_votes[perf_idx];
1606
1607 kgsl_gmu_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
1608 OOB_BOOT_OPTION);
1609 kgsl_gmu_regwrite(device, A6XX_GMU_GX_VOTE_IDX, default_opp->pri_idx);
1610 kgsl_gmu_regwrite(device, A6XX_GMU_MX_VOTE_IDX, default_opp->sec_idx);
1611
1612 ret = a6xx_oob_set(adreno_dev, OOB_BOOT_SLUMBER_SET_MASK,
1613 OOB_BOOT_SLUMBER_CHECK_MASK,
1614 OOB_BOOT_SLUMBER_CLEAR_MASK);
1615
1616 if (ret)
Kyle Piefer247e35c2017-06-08 11:13:11 -07001617 dev_err(&gmu->pdev->dev, "Boot OOB timed out\n");
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001618
1619 return ret;
1620}
1621
Kyle Piefere923b7a2017-03-28 17:31:48 -07001622#define GMU_POWER_STATE_SLUMBER 15
1623
Kyle Pieferb1027b02017-02-10 13:58:58 -08001624/*
1625 * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
1626 * @device: Pointer to KGSL device
1627 */
1628static int a6xx_notify_slumber(struct kgsl_device *device)
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001629{
Kyle Pieferb1027b02017-02-10 13:58:58 -08001630 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1631 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1632 struct gmu_device *gmu = &device->gmu;
1633 int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
1634 int perf_idx = gmu->num_gpupwrlevels - pwr->default_pwrlevel - 1;
1635 int ret, state;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001636
Kyle Piefer247e35c2017-06-08 11:13:11 -07001637 /* Disable the power counter so that the GMU is not busy */
1638 kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
1639
Kyle Pieferf53c1872017-09-11 14:16:43 -07001640 /* Turn off SPTPRAC if we own it */
1641 if (gmu->idle_level < GPU_HW_SPTP_PC)
1642 a6xx_sptprac_disable(adreno_dev);
Kyle Piefer68178ef2017-06-19 16:46:13 -07001643
Kyle Pieferb1027b02017-02-10 13:58:58 -08001644 if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
1645 ret = hfi_notify_slumber(gmu, perf_idx, bus_level);
Kyle Pieferda0fa542017-08-04 13:39:40 -07001646 goto out;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001647 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001648
Kyle Pieferb1027b02017-02-10 13:58:58 -08001649 kgsl_gmu_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
1650 OOB_SLUMBER_OPTION);
Sharat Masetty928bc1d2017-11-13 15:46:55 +05301651 kgsl_gmu_regwrite(device, A6XX_GMU_GX_VOTE_IDX, perf_idx);
1652 kgsl_gmu_regwrite(device, A6XX_GMU_MX_VOTE_IDX, bus_level);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001653
1654 ret = a6xx_oob_set(adreno_dev, OOB_BOOT_SLUMBER_SET_MASK,
1655 OOB_BOOT_SLUMBER_CHECK_MASK,
1656 OOB_BOOT_SLUMBER_CLEAR_MASK);
1657 a6xx_oob_clear(adreno_dev, OOB_BOOT_SLUMBER_CLEAR_MASK);
1658
1659 if (ret)
Kyle Piefer247e35c2017-06-08 11:13:11 -07001660 dev_err(&gmu->pdev->dev, "Notify slumber OOB timed out\n");
Kyle Pieferb1027b02017-02-10 13:58:58 -08001661 else {
George Shenf2d4e052017-05-11 16:28:23 -07001662 kgsl_gmu_regread(device,
1663 A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &state);
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001664 if (state != GPU_HW_SLUMBER) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001665 dev_err(&gmu->pdev->dev,
Kyle Pieferc96ad952017-05-02 13:35:45 -07001666 "Failed to prepare for slumber: 0x%x\n",
1667 state);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001668 ret = -EINVAL;
1669 }
1670 }
1671
Kyle Pieferda0fa542017-08-04 13:39:40 -07001672out:
1673 /* Make sure the fence is in ALLOW mode */
1674 kgsl_gmu_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001675 return ret;
1676}
1677
1678static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
1679{
1680 struct gmu_device *gmu = &device->gmu;
1681 struct device *dev = &gmu->pdev->dev;
George Shen6927d8f2017-07-19 11:38:10 -07001682 int val;
1683
1684 kgsl_gmu_regread(device, A6XX_GPU_CC_GX_DOMAIN_MISC, &val);
George Shen683841f2017-10-03 18:12:02 -07001685 if (!(val & 0x1))
1686 dev_err_ratelimited(&gmu->pdev->dev,
1687 "GMEM CLAMP IO not set while GFX rail off\n");
Kyle Pieferb1027b02017-02-10 13:58:58 -08001688
George Shencbb18e22017-05-11 16:04:13 -07001689 /* RSC wake sequence */
1690 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
Kyle Pieferb1027b02017-02-10 13:58:58 -08001691
George Shencbb18e22017-05-11 16:04:13 -07001692 /* Write request before polling */
1693 wmb();
Kyle Pieferb1027b02017-02-10 13:58:58 -08001694
George Shencbb18e22017-05-11 16:04:13 -07001695 if (timed_poll_check(device,
1696 A6XX_GMU_RSCC_CONTROL_ACK,
1697 BIT(1),
1698 GPU_START_TIMEOUT,
1699 BIT(1))) {
1700 dev_err(dev, "Failed to do GPU RSC power on\n");
1701 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001702 }
1703
George Shencbb18e22017-05-11 16:04:13 -07001704 if (timed_poll_check(device,
1705 A6XX_RSCC_SEQ_BUSY_DRV0,
1706 0,
1707 GPU_START_TIMEOUT,
1708 0xFFFFFFFF))
1709 goto error_rsc;
1710
1711 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
1712
Kyle Piefer247e35c2017-06-08 11:13:11 -07001713 /* Enable the power counter because it was disabled before slumber */
1714 kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
1715
Kyle Piefer68178ef2017-06-19 16:46:13 -07001716 return 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001717error_rsc:
1718 dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
Kyle Piefer68178ef2017-06-19 16:46:13 -07001719 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001720}
1721
1722static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
1723{
1724 struct gmu_device *gmu = &device->gmu;
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001725 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1726 int ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001727
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001728 /* RSC sleep sequence is different on v1 */
1729 if (adreno_is_a630v1(adreno_dev))
1730 kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
1731
Kyle Pieferb1027b02017-02-10 13:58:58 -08001732 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 1);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001733 wmb();
1734
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001735 if (adreno_is_a630v1(adreno_dev))
1736 ret = timed_poll_check(device,
1737 A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0,
1738 BIT(0),
1739 GPU_START_TIMEOUT,
1740 BIT(0));
1741 else
1742 ret = timed_poll_check(device,
1743 A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
1744 BIT(16),
1745 GPU_START_TIMEOUT,
1746 BIT(16));
1747
1748 if (ret) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001749 dev_err(&gmu->pdev->dev, "GPU RSC power off fail\n");
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001750 return -ETIMEDOUT;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001751 }
1752
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001753 /* Read to clear the timestamp valid signal. Don't care what we read. */
1754 if (adreno_is_a630v1(adreno_dev)) {
1755 kgsl_gmu_regread(device,
1756 A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0,
1757 &ret);
1758 kgsl_gmu_regread(device,
1759 A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0,
1760 &ret);
1761 }
1762
Kyle Piefer9e0ac3c2017-05-01 16:34:14 -07001763 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001764
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07001765 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001766 test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07001767 kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001768
Kyle Piefer68178ef2017-06-19 16:46:13 -07001769 return 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001770}
1771
1772/*
1773 * a6xx_gmu_fw_start() - set up GMU and start FW
1774 * @device: Pointer to KGSL device
1775 * @boot_state: State of the GMU being started
1776 */
1777static int a6xx_gmu_fw_start(struct kgsl_device *device,
1778 unsigned int boot_state)
1779{
1780 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1781 struct gmu_device *gmu = &device->gmu;
1782 struct gmu_memdesc *mem_addr = gmu->hfi_mem;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001783 int ret, i;
George Shenf453d422017-08-19 21:12:11 -07001784 unsigned int chipid = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001785
Kyle Piefere923b7a2017-03-28 17:31:48 -07001786 switch (boot_state) {
Kyle Piefer7a714cd2017-06-21 15:55:47 -07001787 case GMU_RESET:
1788 /* fall through */
Kyle Piefere923b7a2017-03-28 17:31:48 -07001789 case GMU_COLD_BOOT:
Kyle Pieferb1027b02017-02-10 13:58:58 -08001790 /* Turn on TCM retention */
1791 kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
1792
Kyle Piefer68178ef2017-06-19 16:46:13 -07001793 if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags))
Kyle Pieferb1027b02017-02-10 13:58:58 -08001794 _load_gmu_rpmh_ucode(device);
Kyle Piefer68178ef2017-06-19 16:46:13 -07001795 else if (boot_state != GMU_RESET) {
George Shencbb18e22017-05-11 16:04:13 -07001796 ret = a6xx_rpmh_power_on_gpu(device);
1797 if (ret)
1798 return ret;
1799 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001800
1801 if (gmu->load_mode == TCM_BOOT) {
1802 /* Load GMU image via AHB bus */
1803 for (i = 0; i < MAX_GMUFW_SIZE; i++)
1804 kgsl_gmu_regwrite(device,
1805 A6XX_GMU_CM3_ITCM_START + i,
1806 *((uint32_t *) gmu->fw_image.
1807 hostptr + i));
1808
1809 /* Prevent leaving reset before the FW is written */
1810 wmb();
1811 } else {
1812 dev_err(&gmu->pdev->dev, "Incorrect GMU load mode %d\n",
1813 gmu->load_mode);
1814 return -EINVAL;
1815 }
Kyle Piefere923b7a2017-03-28 17:31:48 -07001816 break;
1817 case GMU_WARM_BOOT:
Kyle Pieferb1027b02017-02-10 13:58:58 -08001818 ret = a6xx_rpmh_power_on_gpu(device);
1819 if (ret)
1820 return ret;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001821 break;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001822 default:
1823 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001824 }
1825
1826 /* Clear init result to make sure we are getting fresh value */
1827 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_FW_INIT_RESULT, 0);
1828 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_BOOT_CONFIG, gmu->load_mode);
1829
1830 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_QTBL_ADDR,
1831 mem_addr->gmuaddr);
1832 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_QTBL_INFO, 1);
1833
1834 kgsl_gmu_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
1835 FENCE_RANGE_MASK);
1836
George Shenf453d422017-08-19 21:12:11 -07001837 /* Pass chipid to GMU FW, must happen before starting GMU */
1838
1839 /* Keep Core and Major bitfields unchanged */
1840 chipid = adreno_dev->chipid & 0xFFFF0000;
1841
1842 /*
1843 * Compress minor and patch version into 8 bits
1844 * Bit 15-12: minor version
1845 * Bit 11-8: patch version
1846 */
1847 chipid = chipid | (ADRENO_CHIPID_MINOR(adreno_dev->chipid) << 12)
1848 | (ADRENO_CHIPID_PATCH(adreno_dev->chipid) << 8);
1849
1850 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_SFR_ADDR, chipid);
1851
Kyle Pieferd3964162017-04-06 15:44:03 -07001852 /* Configure power control and bring the GMU out of reset */
1853 a6xx_gmu_power_config(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001854 ret = a6xx_gmu_start(device);
1855 if (ret)
1856 return ret;
1857
Kyle Piefere923b7a2017-03-28 17:31:48 -07001858 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001859 ret = a6xx_gfx_rail_on(device);
1860 if (ret) {
1861 a6xx_oob_clear(adreno_dev,
1862 OOB_BOOT_SLUMBER_CLEAR_MASK);
1863 return ret;
1864 }
1865 }
1866
Kyle Piefer68178ef2017-06-19 16:46:13 -07001867 if (gmu->idle_level < GPU_HW_SPTP_PC) {
1868 ret = a6xx_sptprac_enable(adreno_dev);
1869 if (ret)
1870 return ret;
1871 }
1872
Kyle Pieferb1027b02017-02-10 13:58:58 -08001873 ret = a6xx_gmu_hfi_start(device);
1874 if (ret)
1875 return ret;
1876
1877 /* Make sure the write to start HFI happens before sending a message */
1878 wmb();
1879 return ret;
1880}
1881
1882/*
1883 * a6xx_gmu_dcvs_nohfi() - request GMU to do DCVS without using HFI
1884 * @device: Pointer to KGSL device
1885 * @perf_idx: Index into GPU performance level table defined in
1886 * HFI DCVS table message
1887 * @bw_idx: Index into GPU b/w table defined in HFI b/w table message
1888 *
1889 */
1890static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
1891 unsigned int perf_idx, unsigned int bw_idx)
1892{
1893 struct hfi_dcvs_cmd dcvs_cmd = {
Kyle Piefere923b7a2017-03-28 17:31:48 -07001894 .ack_type = ACK_NONBLOCK,
Kyle Pieferb1027b02017-02-10 13:58:58 -08001895 .freq = {
1896 .perf_idx = perf_idx,
1897 .clkset_opt = OPTION_AT_LEAST,
1898 },
1899 .bw = {
1900 .bw_idx = bw_idx,
1901 },
1902 };
1903 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1904 struct gmu_device *gmu = &device->gmu;
1905 union gpu_perf_vote vote;
1906 int ret;
1907
Kyle Pieferb1027b02017-02-10 13:58:58 -08001908 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_ACK_OPTION, dcvs_cmd.ack_type);
1909
1910 vote.fvote = dcvs_cmd.freq;
1911 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_PERF_SETTING, vote.raw);
1912
1913 vote.bvote = dcvs_cmd.bw;
1914 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_BW_SETTING, vote.raw);
1915
1916 ret = a6xx_oob_set(adreno_dev, OOB_DCVS_SET_MASK, OOB_DCVS_CHECK_MASK,
1917 OOB_DCVS_CLEAR_MASK);
1918
1919 if (ret) {
Kyle Piefer247e35c2017-06-08 11:13:11 -07001920 dev_err(&gmu->pdev->dev, "DCVS OOB timed out\n");
Kyle Pieferb1027b02017-02-10 13:58:58 -08001921 goto done;
1922 }
1923
1924 kgsl_gmu_regread(device, A6XX_GMU_DCVS_RETURN, &ret);
1925 if (ret)
1926 dev_err(&gmu->pdev->dev, "OOB DCVS error %d\n", ret);
1927
1928done:
1929 a6xx_oob_clear(adreno_dev, OOB_DCVS_CLEAR_MASK);
1930
1931 return ret;
1932}
1933
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001934static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
1935{
1936 unsigned int reg;
1937
1938 kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
1939 A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg);
George Shencbb18e22017-05-11 16:04:13 -07001940 if (reg & GPUBUSYIGNAHB)
1941 return false;
1942 return true;
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001943}
1944
Kyle Piefer4033f562017-08-16 10:00:48 -07001945static int a6xx_wait_for_lowest_idle(struct adreno_device *adreno_dev)
1946{
1947 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1948 struct gmu_device *gmu = &device->gmu;
1949 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1950 unsigned int reg;
1951 unsigned long t;
1952
1953 if (!kgsl_gmu_isenabled(device))
1954 return 0;
1955
1956 t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
1957 while (!time_after(jiffies, t)) {
1958 adreno_read_gmureg(ADRENO_DEVICE(device),
1959 ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
1960
1961 /* SPTPRAC PC has the same idle level as IFPC */
1962 if ((reg == gmu->idle_level) ||
1963 (gmu->idle_level == GPU_HW_SPTP_PC &&
1964 reg == GPU_HW_IFPC)) {
1965 /* IFPC is not complete until GX is off */
1966 if (gmu->idle_level != GPU_HW_IFPC ||
1967 !gpudev->gx_is_on(adreno_dev))
1968 return 0;
1969 }
1970
1971 /* Wait 100us to reduce unnecessary AHB bus traffic */
Oleg Perelet7f7f9f52017-10-31 10:02:45 -07001972 usleep_range(10, 100);
Kyle Piefer4033f562017-08-16 10:00:48 -07001973 }
1974
1975 /* Check one last time */
1976 adreno_read_gmureg(ADRENO_DEVICE(device),
1977 ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
1978 if ((reg == gmu->idle_level) ||
1979 (gmu->idle_level == GPU_HW_SPTP_PC &&
1980 reg == GPU_HW_IFPC)) {
1981 if (gmu->idle_level != GPU_HW_IFPC ||
1982 !gpudev->gx_is_on(adreno_dev))
1983 return 0;
1984 }
1985
George Shen7201a6d2017-11-03 10:39:36 -07001986 WARN(1, "Timeout waiting for lowest idle level: %d\n", reg);
Kyle Piefer4033f562017-08-16 10:00:48 -07001987 return -ETIMEDOUT;
1988}
1989
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001990static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001991{
1992 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1993 struct gmu_device *gmu = &device->gmu;
Kyle Piefer247e35c2017-06-08 11:13:11 -07001994 unsigned int status, status2;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001995
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001996 if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001997 0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
Kyle Piefer247e35c2017-06-08 11:13:11 -07001998 kgsl_gmu_regread(device,
1999 A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &status);
2000 kgsl_gmu_regread(device,
2001 A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2, &status2);
2002 dev_err(&gmu->pdev->dev,
2003 "GMU not idling: status=0x%x, status2=0x%x\n",
2004 status, status2);
Oleg Perelet62d5cec2017-03-27 16:14:52 -07002005 return -ETIMEDOUT;
2006 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08002007
Oleg Perelet62d5cec2017-03-27 16:14:52 -07002008 return 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08002009}
2010
2011/*
2012 * _load_gmu_firmware() - Load the ucode into the GPMU RAM & PDC/RSC
2013 * @device: Pointer to KGSL device
2014 */
2015static int _load_gmu_firmware(struct kgsl_device *device)
2016{
2017 const struct firmware *fw = NULL;
2018 const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2019 struct gmu_device *gmu = &device->gmu;
2020 const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
2021 int image_size, ret = -EINVAL;
2022
2023 /* there is no GMU */
2024 if (!kgsl_gmu_isenabled(device))
2025 return 0;
2026
2027 /* GMU fw already saved and verified so do nothing new */
2028 if (gmu->fw_image.hostptr != 0)
2029 return 0;
2030
2031 if (gpucore->gpmufw_name == NULL)
2032 return -EINVAL;
2033
2034 ret = request_firmware(&fw, gpucore->gpmufw_name, device->dev);
2035 if (ret || fw == NULL) {
2036 KGSL_CORE_ERR("request_firmware (%s) failed: %d\n",
2037 gpucore->gpmufw_name, ret);
2038 return ret;
2039 }
2040
2041 image_size = PAGE_ALIGN(fw->size);
2042
2043 ret = allocate_gmu_image(gmu, image_size);
2044
2045 /* load into shared memory with GMU */
2046 if (!ret)
2047 memcpy(gmu->fw_image.hostptr, fw->data, fw->size);
2048
2049 release_firmware(fw);
2050
2051 return ret;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002052}
2053
2054/*
2055 * a6xx_microcode_read() - Read microcode
2056 * @adreno_dev: Pointer to adreno device
2057 */
2058static int a6xx_microcode_read(struct adreno_device *adreno_dev)
2059{
Lynus Vaz573e5012017-06-20 20:37:50 +05302060 int ret;
2061 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2062 struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
2063
2064 if (sqe_fw->memdesc.hostptr == NULL) {
2065 ret = _load_firmware(device, adreno_dev->gpucore->sqefw_name,
2066 sqe_fw);
2067 if (ret)
2068 return ret;
2069 }
2070
2071 return _load_gmu_firmware(device);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002072}
2073
Rajesh Kemisettid1ca9542017-10-18 15:35:41 +05302074#define GBIF_CX_HALT_MASK BIT(1)
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002075
2076static int a6xx_soft_reset(struct adreno_device *adreno_dev)
2077{
2078 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2079 unsigned int reg;
Shrenuj Bansal13cae372017-06-07 13:34:35 -07002080 unsigned long time;
2081 bool vbif_acked = false;
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002082
2083 /*
2084 * For the soft reset case with GMU enabled this part is done
2085 * by the GMU firmware
2086 */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07002087 if (kgsl_gmu_isenabled(device) &&
2088 !test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv))
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002089 return 0;
2090
2091
2092 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 1);
2093 /*
2094 * Do a dummy read to get a brief read cycle delay for the
2095 * reset to take effect
2096 */
2097 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, &reg);
2098 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
2099
Shrenuj Bansal13cae372017-06-07 13:34:35 -07002100 /* Wait for the VBIF reset ack to complete */
2101 time = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
2102
2103 do {
2104 kgsl_regread(device, A6XX_RBBM_VBIF_GX_RESET_STATUS, &reg);
2105 if ((reg & VBIF_RESET_ACK_MASK) == VBIF_RESET_ACK_MASK) {
2106 vbif_acked = true;
2107 break;
2108 }
2109 cpu_relax();
2110 } while (!time_after(jiffies, time));
2111
2112 if (!vbif_acked)
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002113 return -ETIMEDOUT;
2114
Rajesh Kemisettid1ca9542017-10-18 15:35:41 +05302115 /*
2116 * GBIF GX halt will be released automatically by sw_reset.
2117 * Release GBIF CX halt after sw_reset
2118 */
2119 if (adreno_has_gbif(adreno_dev))
2120 kgsl_regrmw(device, A6XX_GBIF_HALT, GBIF_CX_HALT_MASK, 0);
2121
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002122 a6xx_sptprac_enable(adreno_dev);
2123
2124 return 0;
2125}
2126
Kyle Piefere923b7a2017-03-28 17:31:48 -07002127#define A6XX_STATE_OF_CHILD (BIT(4) | BIT(5))
2128#define A6XX_IDLE_FULL_LLM BIT(0)
2129#define A6XX_WAKEUP_ACK BIT(1)
2130#define A6XX_IDLE_FULL_ACK BIT(0)
2131#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
2132
2133static void a6xx_isense_disable(struct kgsl_device *device)
2134{
2135 unsigned int val;
2136 const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2137
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07002138 if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
2139 !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
Kyle Piefere923b7a2017-03-28 17:31:48 -07002140 return;
2141
2142 kgsl_gmu_regread(device, A6XX_GPU_CS_ENABLE_REG, &val);
2143 if (val) {
2144 kgsl_gmu_regwrite(device, A6XX_GPU_CS_ENABLE_REG, 0);
2145 kgsl_gmu_regwrite(device, A6XX_GMU_ISENSE_CTRL, 0);
2146 }
2147}
2148
2149static int a6xx_llm_glm_handshake(struct kgsl_device *device)
2150{
2151 unsigned int val;
2152 const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2153 struct gmu_device *gmu = &device->gmu;
2154
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07002155 if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
2156 !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
Kyle Piefere923b7a2017-03-28 17:31:48 -07002157 return 0;
2158
2159 kgsl_gmu_regread(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, &val);
2160 if (!(val & A6XX_STATE_OF_CHILD)) {
2161 kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0, BIT(4));
2162 kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0,
2163 A6XX_IDLE_FULL_LLM);
2164 if (timed_poll_check(device, A6XX_GMU_LLM_GLM_SLEEP_STATUS,
2165 A6XX_IDLE_FULL_ACK, GPU_RESET_TIMEOUT,
2166 A6XX_IDLE_FULL_ACK)) {
2167 dev_err(&gmu->pdev->dev, "LLM-GLM handshake failed\n");
2168 return -EINVAL;
2169 }
2170 }
2171
2172 return 0;
2173}
2174
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07002175
2176static void a6xx_count_throttles(struct adreno_device *adreno_dev,
2177 uint64_t adj)
2178{
2179 if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
2180 !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
2181 return;
2182
2183 kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
2184 adreno_dev->lm_threshold_count,
2185 &adreno_dev->lm_threshold_cross);
2186}
2187
Kyle Piefere923b7a2017-03-28 17:31:48 -07002188static int a6xx_complete_rpmh_votes(struct kgsl_device *device)
2189{
2190 int ret = 0;
2191
2192 if (!kgsl_gmu_isenabled(device))
2193 return ret;
2194
2195 ret |= timed_poll_check(device, A6XX_RSCC_TCS0_DRV0_STATUS, BIT(0),
2196 GPU_RESET_TIMEOUT, BIT(0));
2197 ret |= timed_poll_check(device, A6XX_RSCC_TCS1_DRV0_STATUS, BIT(0),
2198 GPU_RESET_TIMEOUT, BIT(0));
2199 ret |= timed_poll_check(device, A6XX_RSCC_TCS2_DRV0_STATUS, BIT(0),
2200 GPU_RESET_TIMEOUT, BIT(0));
2201 ret |= timed_poll_check(device, A6XX_RSCC_TCS3_DRV0_STATUS, BIT(0),
2202 GPU_RESET_TIMEOUT, BIT(0));
2203
2204 return ret;
2205}
2206
2207static int a6xx_gmu_suspend(struct kgsl_device *device)
2208{
2209 /* Max GX clients on A6xx is 2: GMU and KMD */
2210 int ret = 0, max_client_num = 2;
2211 struct gmu_device *gmu = &device->gmu;
2212 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2213
2214 /* do it only if LM feature is enabled */
2215 /* Disable ISENSE if it's on */
2216 a6xx_isense_disable(device);
2217
2218 /* LLM-GLM handshake sequence */
2219 a6xx_llm_glm_handshake(device);
2220
2221 /* If SPTP_RAC is on, turn off SPTP_RAC HS */
2222 a6xx_sptprac_disable(adreno_dev);
2223
George Shenf135a972017-08-24 16:59:42 -07002224 /* Disconnect GPU from BUS is not needed if CX GDSC goes off later */
Kyle Piefere923b7a2017-03-28 17:31:48 -07002225
2226 /* Check no outstanding RPMh voting */
2227 a6xx_complete_rpmh_votes(device);
2228
Kyle Piefer68178ef2017-06-19 16:46:13 -07002229 if (gmu->gx_gdsc) {
Kyle Piefere923b7a2017-03-28 17:31:48 -07002230 if (regulator_is_enabled(gmu->gx_gdsc)) {
2231 /* Switch gx gdsc control from GMU to CPU
2232 * force non-zero reference count in clk driver
2233 * so next disable call will turn
2234 * off the GDSC
2235 */
2236 ret = regulator_enable(gmu->gx_gdsc);
2237 if (ret)
2238 dev_err(&gmu->pdev->dev,
2239 "suspend fail: gx enable\n");
2240
2241 while ((max_client_num)) {
2242 ret = regulator_disable(gmu->gx_gdsc);
2243 if (!regulator_is_enabled(gmu->gx_gdsc))
2244 break;
2245 max_client_num -= 1;
2246 }
2247
2248 if (!max_client_num)
2249 dev_err(&gmu->pdev->dev,
2250 "suspend fail: cannot disable gx\n");
2251 }
2252 }
2253
2254 return ret;
2255}
2256
2257/*
2258 * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
2259 * @adreno_dev: Pointer to adreno device
2260 * @mode: requested power mode
2261 * @arg1: first argument for mode control
2262 * @arg2: second argument for mode control
2263 */
2264static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
2265 unsigned int mode, unsigned int arg1, unsigned int arg2)
2266{
2267 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2268 struct gmu_device *gmu = &device->gmu;
2269 int ret;
2270
2271 switch (mode) {
2272 case GMU_FW_START:
2273 ret = a6xx_gmu_fw_start(device, arg1);
2274 break;
2275 case GMU_SUSPEND:
2276 ret = a6xx_gmu_suspend(device);
2277 break;
2278 case GMU_FW_STOP:
2279 ret = a6xx_rpmh_power_off_gpu(device);
2280 break;
2281 case GMU_DCVS_NOHFI:
2282 ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
2283 break;
2284 case GMU_NOTIFY_SLUMBER:
2285 ret = a6xx_notify_slumber(device);
2286 break;
2287 default:
2288 dev_err(&gmu->pdev->dev,
2289 "unsupported GMU power ctrl mode:%d\n", mode);
2290 ret = -EINVAL;
2291 break;
2292 }
2293
2294 return ret;
2295}
2296
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07002297/**
2298 * a6xx_reset() - Helper function to reset the GPU
2299 * @device: Pointer to the KGSL device structure for the GPU
2300 * @fault: Type of fault. Needed to skip soft reset for MMU fault
2301 *
2302 * Try to reset the GPU to recover from a fault. First, try to do a low latency
2303 * soft reset. If the soft reset fails for some reason, then bring out the big
2304 * guns and toggle the footswitch.
2305 */
2306static int a6xx_reset(struct kgsl_device *device, int fault)
2307{
2308 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2309 int ret = -EINVAL;
2310 int i = 0;
2311
2312 /* Use the regular reset sequence for No GMU */
2313 if (!kgsl_gmu_isenabled(device))
2314 return adreno_reset(device, fault);
2315
2316 /* Transition from ACTIVE to RESET state */
2317 kgsl_pwrctrl_change_state(device, KGSL_STATE_RESET);
2318
2319 /* Try soft reset first */
2320 if (!(fault & ADRENO_IOMMU_PAGE_FAULT)) {
2321 int acked;
2322
2323 /* NMI */
2324 kgsl_gmu_regwrite(device, A6XX_GMU_NMI_CONTROL_STATUS, 0);
2325 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_CFG, (1 << 9));
2326
2327 for (i = 0; i < 10; i++) {
2328 kgsl_gmu_regread(device,
2329 A6XX_GMU_NMI_CONTROL_STATUS, &acked);
2330
2331 /* NMI FW ACK recevied */
2332 if (acked == 0x1)
2333 break;
2334
2335 udelay(100);
2336 }
2337
Rajesh Kemisettid1ca9542017-10-18 15:35:41 +05302338 if (acked) {
2339 /* Make sure VBIF/GBIF is cleared before resetting */
2340 ret = adreno_vbif_clear_pending_transactions(device);
2341
2342 if (ret == 0)
2343 ret = adreno_soft_reset(device);
2344 }
2345
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07002346 if (ret)
2347 KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
2348 }
2349 if (ret) {
2350 /* If soft reset failed/skipped, then pull the power */
2351 set_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv);
2352 /* since device is officially off now clear start bit */
2353 clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
2354
2355 /* Keep trying to start the device until it works */
2356 for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) {
2357 ret = adreno_start(device, 0);
2358 if (!ret)
2359 break;
2360
2361 msleep(20);
2362 }
2363 }
2364
2365 clear_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv);
2366
2367 if (ret)
2368 return ret;
2369
2370 if (i != 0)
2371 KGSL_DRV_WARN(device, "Device hard reset tried %d tries\n", i);
2372
2373 /*
2374 * If active_cnt is non-zero then the system was active before
2375 * going into a reset - put it back in that state
2376 */
2377
2378 if (atomic_read(&device->active_cnt))
2379 kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
2380 else
2381 kgsl_pwrctrl_change_state(device, KGSL_STATE_NAP);
2382
2383 return ret;
2384}
2385
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002386static void a6xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
2387{
2388 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2389 unsigned int status1, status2;
2390
2391 kgsl_regread(device, A6XX_CP_INTERRUPT_STATUS, &status1);
2392
Shrenuj Bansala602c022017-03-08 10:40:34 -08002393 if (status1 & BIT(A6XX_CP_OPCODE_ERROR)) {
2394 unsigned int opcode;
2395
2396 kgsl_regwrite(device, A6XX_CP_SQE_STAT_ADDR, 1);
2397 kgsl_regread(device, A6XX_CP_SQE_STAT_DATA, &opcode);
2398 KGSL_DRV_CRIT_RATELIMIT(device,
Kyle Piefer2ce06162017-03-15 11:29:08 -07002399 "CP opcode error interrupt | opcode=0x%8.8x\n",
2400 opcode);
Shrenuj Bansala602c022017-03-08 10:40:34 -08002401 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002402 if (status1 & BIT(A6XX_CP_UCODE_ERROR))
2403 KGSL_DRV_CRIT_RATELIMIT(device, "CP ucode error interrupt\n");
2404 if (status1 & BIT(A6XX_CP_HW_FAULT_ERROR)) {
2405 kgsl_regread(device, A6XX_CP_HW_FAULT, &status2);
2406 KGSL_DRV_CRIT_RATELIMIT(device,
2407 "CP | Ringbuffer HW fault | status=%x\n",
2408 status2);
2409 }
2410 if (status1 & BIT(A6XX_CP_REGISTER_PROTECTION_ERROR)) {
2411 kgsl_regread(device, A6XX_CP_PROTECT_STATUS, &status2);
2412 KGSL_DRV_CRIT_RATELIMIT(device,
2413 "CP | Protected mode error | %s | addr=%x | status=%x\n",
2414 status2 & (1 << 20) ? "READ" : "WRITE",
Lynus Vazdc807342017-02-20 18:23:25 +05302415 status2 & 0x3FFFF, status2);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002416 }
2417 if (status1 & BIT(A6XX_CP_AHB_ERROR))
2418 KGSL_DRV_CRIT_RATELIMIT(device,
2419 "CP AHB error interrupt\n");
2420 if (status1 & BIT(A6XX_CP_VSD_PARITY_ERROR))
2421 KGSL_DRV_CRIT_RATELIMIT(device,
2422 "CP VSD decoder parity error\n");
2423 if (status1 & BIT(A6XX_CP_ILLEGAL_INSTR_ERROR))
2424 KGSL_DRV_CRIT_RATELIMIT(device,
2425 "CP Illegal instruction error\n");
2426
2427}
2428
2429static void a6xx_err_callback(struct adreno_device *adreno_dev, int bit)
2430{
2431 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2432
2433 switch (bit) {
2434 case A6XX_INT_CP_AHB_ERROR:
2435 KGSL_DRV_CRIT_RATELIMIT(device, "CP: AHB bus error\n");
2436 break;
2437 case A6XX_INT_ATB_ASYNCFIFO_OVERFLOW:
2438 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB ASYNC overflow\n");
2439 break;
2440 case A6XX_INT_RBBM_ATB_BUS_OVERFLOW:
2441 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB bus overflow\n");
2442 break;
2443 case A6XX_INT_UCHE_OOB_ACCESS:
2444 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Out of bounds access\n");
2445 break;
2446 case A6XX_INT_UCHE_TRAP_INTR:
2447 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Trap interrupt\n");
2448 break;
2449 default:
2450 KGSL_DRV_CRIT_RATELIMIT(device, "Unknown interrupt %d\n", bit);
2451 }
2452}
2453
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002454/* GPU System Cache control registers */
2455#define A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0 0x4
2456#define A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1 0x8
2457
2458static inline void _reg_rmw(void __iomem *regaddr,
2459 unsigned int mask, unsigned int bits)
2460{
2461 unsigned int val = 0;
2462
2463 val = __raw_readl(regaddr);
2464 /* Make sure the above read completes before we proceed */
2465 rmb();
2466 val &= ~mask;
2467 __raw_writel(val | bits, regaddr);
2468 /* Make sure the above write posts before we proceed*/
2469 wmb();
2470}
2471
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002472/*
2473 * a6xx_llc_configure_gpu_scid() - Program the sub-cache ID for all GPU blocks
2474 * @adreno_dev: The adreno device pointer
2475 */
2476static void a6xx_llc_configure_gpu_scid(struct adreno_device *adreno_dev)
2477{
2478 uint32_t gpu_scid;
2479 uint32_t gpu_cntl1_val = 0;
2480 int i;
2481 void __iomem *gpu_cx_reg;
2482
2483 gpu_scid = adreno_llc_get_scid(adreno_dev->gpu_llc_slice);
2484 for (i = 0; i < A6XX_LLC_NUM_GPU_SCIDS; i++)
2485 gpu_cntl1_val = (gpu_cntl1_val << A6XX_GPU_LLC_SCID_NUM_BITS)
2486 | gpu_scid;
2487
2488 gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
2489 _reg_rmw(gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
2490 A6XX_GPU_LLC_SCID_MASK, gpu_cntl1_val);
2491 iounmap(gpu_cx_reg);
2492}
2493
2494/*
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07002495 * a6xx_llc_configure_gpuhtw_scid() - Program the SCID for GPU pagetables
2496 * @adreno_dev: The adreno device pointer
2497 */
2498static void a6xx_llc_configure_gpuhtw_scid(struct adreno_device *adreno_dev)
2499{
2500 uint32_t gpuhtw_scid;
2501 void __iomem *gpu_cx_reg;
2502
2503 gpuhtw_scid = adreno_llc_get_scid(adreno_dev->gpuhtw_llc_slice);
2504
2505 gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
Kyle Piefer11a48b62017-03-17 14:53:40 -07002506 _reg_rmw(gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07002507 A6XX_GPUHTW_LLC_SCID_MASK,
2508 gpuhtw_scid << A6XX_GPUHTW_LLC_SCID_SHIFT);
2509 iounmap(gpu_cx_reg);
2510}
2511
2512/*
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002513 * a6xx_llc_enable_overrides() - Override the page attributes
2514 * @adreno_dev: The adreno device pointer
2515 */
2516static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
2517{
2518 void __iomem *gpu_cx_reg;
2519
2520 /*
2521 * 0x3: readnoallocoverrideen=0
2522 * read-no-alloc=0 - Allocate lines on read miss
2523 * writenoallocoverrideen=1
2524 * write-no-alloc=1 - Do not allocates lines on write miss
2525 */
2526 gpu_cx_reg = ioremap(A6XX_GPU_CX_REG_BASE, A6XX_GPU_CX_REG_SIZE);
2527 __raw_writel(0x3, gpu_cx_reg + A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0);
2528 /* Make sure the above write posts before we proceed*/
2529 wmb();
2530 iounmap(gpu_cx_reg);
2531}
2532
Lynus Vaz1fde74d2017-03-20 18:02:47 +05302533static const char *fault_block[8] = {
2534 [0] = "CP",
2535 [1] = "UCHE",
2536 [2] = "VFD",
2537 [3] = "UCHE",
2538 [4] = "CCU",
2539 [5] = "unknown",
2540 [6] = "CDP Prefetch",
2541 [7] = "GPMU",
2542};
2543
2544static const char *uche_client[8] = {
2545 [0] = "VFD",
2546 [1] = "SP",
2547 [2] = "VSC",
2548 [3] = "VPC",
2549 [4] = "HLSQ",
2550 [5] = "PC",
2551 [6] = "LRZ",
2552 [7] = "unknown",
2553};
2554
2555static const char *a6xx_iommu_fault_block(struct adreno_device *adreno_dev,
2556 unsigned int fsynr1)
2557{
2558 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2559 unsigned int client_id;
2560 unsigned int uche_client_id;
2561
2562 client_id = fsynr1 & 0xff;
2563
2564 if (client_id >= ARRAY_SIZE(fault_block))
2565 return "unknown";
2566 else if (client_id != 3)
2567 return fault_block[client_id];
2568
Harshdeep Dhatt3f074a92017-05-01 12:59:01 -06002569 mutex_lock(&device->mutex);
Lynus Vaz1fde74d2017-03-20 18:02:47 +05302570 kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
Harshdeep Dhatt3f074a92017-05-01 12:59:01 -06002571 mutex_unlock(&device->mutex);
2572
Lynus Vaz1fde74d2017-03-20 18:02:47 +05302573 return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
2574}
2575
Harshdeep Dhattd388e522017-07-06 14:30:06 -06002576static void a6xx_cp_callback(struct adreno_device *adreno_dev, int bit)
2577{
2578 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2579
Harshdeep Dhatt7ee8a862017-11-20 17:51:54 -07002580 if (adreno_is_preemption_enabled(adreno_dev))
Harshdeep Dhatt12a642c2017-08-17 12:19:26 -06002581 a6xx_preemption_trigger(adreno_dev);
2582
Harshdeep Dhattd388e522017-07-06 14:30:06 -06002583 adreno_dispatcher_schedule(device);
2584}
2585
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002586#define A6XX_INT_MASK \
Kyle Pieferb1027b02017-02-10 13:58:58 -08002587 ((1 << A6XX_INT_CP_AHB_ERROR) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002588 (1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) | \
Kyle Pieferb1027b02017-02-10 13:58:58 -08002589 (1 << A6XX_INT_RBBM_GPC_ERROR) | \
2590 (1 << A6XX_INT_CP_SW) | \
2591 (1 << A6XX_INT_CP_HW_ERROR) | \
2592 (1 << A6XX_INT_CP_IB2) | \
2593 (1 << A6XX_INT_CP_IB1) | \
2594 (1 << A6XX_INT_CP_RB) | \
2595 (1 << A6XX_INT_CP_CACHE_FLUSH_TS) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002596 (1 << A6XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
Kyle Pieferb1027b02017-02-10 13:58:58 -08002597 (1 << A6XX_INT_RBBM_HANG_DETECT) | \
2598 (1 << A6XX_INT_UCHE_OOB_ACCESS) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002599 (1 << A6XX_INT_UCHE_TRAP_INTR))
2600
2601static struct adreno_irq_funcs a6xx_irq_funcs[32] = {
2602 ADRENO_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
2603 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 1 - RBBM_AHB_ERROR */
2604 ADRENO_IRQ_CALLBACK(NULL), /* 2 - UNUSED */
2605 ADRENO_IRQ_CALLBACK(NULL), /* 3 - UNUSED */
2606 ADRENO_IRQ_CALLBACK(NULL), /* 4 - UNUSED */
2607 ADRENO_IRQ_CALLBACK(NULL), /* 5 - UNUSED */
2608 /* 6 - RBBM_ATB_ASYNC_OVERFLOW */
2609 ADRENO_IRQ_CALLBACK(a6xx_err_callback),
2610 ADRENO_IRQ_CALLBACK(NULL), /* 7 - GPC_ERR */
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06002611 ADRENO_IRQ_CALLBACK(a6xx_preemption_callback),/* 8 - CP_SW */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002612 ADRENO_IRQ_CALLBACK(a6xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */
2613 ADRENO_IRQ_CALLBACK(NULL), /* 10 - CP_CCU_FLUSH_DEPTH_TS */
2614 ADRENO_IRQ_CALLBACK(NULL), /* 11 - CP_CCU_FLUSH_COLOR_TS */
2615 ADRENO_IRQ_CALLBACK(NULL), /* 12 - CP_CCU_RESOLVE_TS */
2616 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 13 - CP_IB2_INT */
2617 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 14 - CP_IB1_INT */
2618 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 15 - CP_RB_INT */
2619 ADRENO_IRQ_CALLBACK(NULL), /* 16 - UNUSED */
2620 ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
2621 ADRENO_IRQ_CALLBACK(NULL), /* 18 - CP_WT_DONE_TS */
2622 ADRENO_IRQ_CALLBACK(NULL), /* 19 - UNUSED */
Harshdeep Dhattd388e522017-07-06 14:30:06 -06002623 ADRENO_IRQ_CALLBACK(a6xx_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002624 ADRENO_IRQ_CALLBACK(NULL), /* 21 - UNUSED */
2625 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 22 - RBBM_ATB_BUS_OVERFLOW */
2626 /* 23 - MISC_HANG_DETECT */
2627 ADRENO_IRQ_CALLBACK(adreno_hang_int_callback),
2628 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 24 - UCHE_OOB_ACCESS */
2629 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 25 - UCHE_TRAP_INTR */
2630 ADRENO_IRQ_CALLBACK(NULL), /* 26 - DEBBUS_INTR_0 */
2631 ADRENO_IRQ_CALLBACK(NULL), /* 27 - DEBBUS_INTR_1 */
2632 ADRENO_IRQ_CALLBACK(NULL), /* 28 - UNUSED */
2633 ADRENO_IRQ_CALLBACK(NULL), /* 29 - UNUSED */
2634 ADRENO_IRQ_CALLBACK(NULL), /* 30 - ISDB_CPU_IRQ */
2635 ADRENO_IRQ_CALLBACK(NULL), /* 31 - ISDB_UNDER_DEBUG */
2636};
2637
2638static struct adreno_irq a6xx_irq = {
2639 .funcs = a6xx_irq_funcs,
2640 .mask = A6XX_INT_MASK,
2641};
2642
Shrenuj Bansal41665402016-12-16 15:25:54 -08002643static struct adreno_snapshot_sizes a6xx_snap_sizes = {
2644 .cp_pfp = 0x33,
2645 .roq = 0x400,
2646};
2647
2648static struct adreno_snapshot_data a6xx_snapshot_data = {
2649 .sect_sizes = &a6xx_snap_sizes,
2650};
2651
Lokesh Batraa8300e02017-05-25 11:17:40 -07002652static struct adreno_coresight_register a6xx_coresight_regs[] = {
2653 { A6XX_DBGC_CFG_DBGBUS_SEL_A },
2654 { A6XX_DBGC_CFG_DBGBUS_SEL_B },
2655 { A6XX_DBGC_CFG_DBGBUS_SEL_C },
2656 { A6XX_DBGC_CFG_DBGBUS_SEL_D },
2657 { A6XX_DBGC_CFG_DBGBUS_CNTLT },
2658 { A6XX_DBGC_CFG_DBGBUS_CNTLM },
2659 { A6XX_DBGC_CFG_DBGBUS_OPL },
2660 { A6XX_DBGC_CFG_DBGBUS_OPE },
2661 { A6XX_DBGC_CFG_DBGBUS_IVTL_0 },
2662 { A6XX_DBGC_CFG_DBGBUS_IVTL_1 },
2663 { A6XX_DBGC_CFG_DBGBUS_IVTL_2 },
2664 { A6XX_DBGC_CFG_DBGBUS_IVTL_3 },
2665 { A6XX_DBGC_CFG_DBGBUS_MASKL_0 },
2666 { A6XX_DBGC_CFG_DBGBUS_MASKL_1 },
2667 { A6XX_DBGC_CFG_DBGBUS_MASKL_2 },
2668 { A6XX_DBGC_CFG_DBGBUS_MASKL_3 },
2669 { A6XX_DBGC_CFG_DBGBUS_BYTEL_0 },
2670 { A6XX_DBGC_CFG_DBGBUS_BYTEL_1 },
2671 { A6XX_DBGC_CFG_DBGBUS_IVTE_0 },
2672 { A6XX_DBGC_CFG_DBGBUS_IVTE_1 },
2673 { A6XX_DBGC_CFG_DBGBUS_IVTE_2 },
2674 { A6XX_DBGC_CFG_DBGBUS_IVTE_3 },
2675 { A6XX_DBGC_CFG_DBGBUS_MASKE_0 },
2676 { A6XX_DBGC_CFG_DBGBUS_MASKE_1 },
2677 { A6XX_DBGC_CFG_DBGBUS_MASKE_2 },
2678 { A6XX_DBGC_CFG_DBGBUS_MASKE_3 },
2679 { A6XX_DBGC_CFG_DBGBUS_NIBBLEE },
2680 { A6XX_DBGC_CFG_DBGBUS_PTRC0 },
2681 { A6XX_DBGC_CFG_DBGBUS_PTRC1 },
2682 { A6XX_DBGC_CFG_DBGBUS_LOADREG },
2683 { A6XX_DBGC_CFG_DBGBUS_IDX },
2684 { A6XX_DBGC_CFG_DBGBUS_CLRC },
2685 { A6XX_DBGC_CFG_DBGBUS_LOADIVT },
2686 { A6XX_DBGC_VBIF_DBG_CNTL },
2687 { A6XX_DBGC_DBG_LO_HI_GPIO },
2688 { A6XX_DBGC_EXT_TRACE_BUS_CNTL },
2689 { A6XX_DBGC_READ_AHB_THROUGH_DBG },
2690 { A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 },
2691 { A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 },
2692 { A6XX_DBGC_EVT_CFG },
2693 { A6XX_DBGC_EVT_INTF_SEL_0 },
2694 { A6XX_DBGC_EVT_INTF_SEL_1 },
2695 { A6XX_DBGC_PERF_ATB_CFG },
2696 { A6XX_DBGC_PERF_ATB_COUNTER_SEL_0 },
2697 { A6XX_DBGC_PERF_ATB_COUNTER_SEL_1 },
2698 { A6XX_DBGC_PERF_ATB_COUNTER_SEL_2 },
2699 { A6XX_DBGC_PERF_ATB_COUNTER_SEL_3 },
2700 { A6XX_DBGC_PERF_ATB_TRIG_INTF_SEL_0 },
2701 { A6XX_DBGC_PERF_ATB_TRIG_INTF_SEL_1 },
2702 { A6XX_DBGC_PERF_ATB_DRAIN_CMD },
2703 { A6XX_DBGC_ECO_CNTL },
2704 { A6XX_DBGC_AHB_DBG_CNTL },
2705};
2706
2707static struct adreno_coresight_register a6xx_coresight_regs_cx[] = {
2708 { A6XX_CX_DBGC_CFG_DBGBUS_SEL_A },
2709 { A6XX_CX_DBGC_CFG_DBGBUS_SEL_B },
2710 { A6XX_CX_DBGC_CFG_DBGBUS_SEL_C },
2711 { A6XX_CX_DBGC_CFG_DBGBUS_SEL_D },
2712 { A6XX_CX_DBGC_CFG_DBGBUS_CNTLT },
2713 { A6XX_CX_DBGC_CFG_DBGBUS_CNTLM },
2714 { A6XX_CX_DBGC_CFG_DBGBUS_OPL },
2715 { A6XX_CX_DBGC_CFG_DBGBUS_OPE },
2716 { A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 },
2717 { A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 },
2718 { A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 },
2719 { A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 },
2720 { A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 },
2721 { A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 },
2722 { A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 },
2723 { A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 },
2724 { A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 },
2725 { A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 },
2726 { A6XX_CX_DBGC_CFG_DBGBUS_IVTE_0 },
2727 { A6XX_CX_DBGC_CFG_DBGBUS_IVTE_1 },
2728 { A6XX_CX_DBGC_CFG_DBGBUS_IVTE_2 },
2729 { A6XX_CX_DBGC_CFG_DBGBUS_IVTE_3 },
2730 { A6XX_CX_DBGC_CFG_DBGBUS_MASKE_0 },
2731 { A6XX_CX_DBGC_CFG_DBGBUS_MASKE_1 },
2732 { A6XX_CX_DBGC_CFG_DBGBUS_MASKE_2 },
2733 { A6XX_CX_DBGC_CFG_DBGBUS_MASKE_3 },
2734 { A6XX_CX_DBGC_CFG_DBGBUS_NIBBLEE },
2735 { A6XX_CX_DBGC_CFG_DBGBUS_PTRC0 },
2736 { A6XX_CX_DBGC_CFG_DBGBUS_PTRC1 },
2737 { A6XX_CX_DBGC_CFG_DBGBUS_LOADREG },
2738 { A6XX_CX_DBGC_CFG_DBGBUS_IDX },
2739 { A6XX_CX_DBGC_CFG_DBGBUS_CLRC },
2740 { A6XX_CX_DBGC_CFG_DBGBUS_LOADIVT },
2741 { A6XX_CX_DBGC_VBIF_DBG_CNTL },
2742 { A6XX_CX_DBGC_DBG_LO_HI_GPIO },
2743 { A6XX_CX_DBGC_EXT_TRACE_BUS_CNTL },
2744 { A6XX_CX_DBGC_READ_AHB_THROUGH_DBG },
2745 { A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 },
2746 { A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 },
2747 { A6XX_CX_DBGC_EVT_CFG },
2748 { A6XX_CX_DBGC_EVT_INTF_SEL_0 },
2749 { A6XX_CX_DBGC_EVT_INTF_SEL_1 },
2750 { A6XX_CX_DBGC_PERF_ATB_CFG },
2751 { A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_0 },
2752 { A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_1 },
2753 { A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_2 },
2754 { A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_3 },
2755 { A6XX_CX_DBGC_PERF_ATB_TRIG_INTF_SEL_0 },
2756 { A6XX_CX_DBGC_PERF_ATB_TRIG_INTF_SEL_1 },
2757 { A6XX_CX_DBGC_PERF_ATB_DRAIN_CMD },
2758 { A6XX_CX_DBGC_ECO_CNTL },
2759 { A6XX_CX_DBGC_AHB_DBG_CNTL },
2760};
2761
2762static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_a, &a6xx_coresight_regs[0]);
2763static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_b, &a6xx_coresight_regs[1]);
2764static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_c, &a6xx_coresight_regs[2]);
2765static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_d, &a6xx_coresight_regs[3]);
2766static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_cntlt, &a6xx_coresight_regs[4]);
2767static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_cntlm, &a6xx_coresight_regs[5]);
2768static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_opl, &a6xx_coresight_regs[6]);
2769static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ope, &a6xx_coresight_regs[7]);
2770static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_0, &a6xx_coresight_regs[8]);
2771static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_1, &a6xx_coresight_regs[9]);
2772static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_2, &a6xx_coresight_regs[10]);
2773static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_3, &a6xx_coresight_regs[11]);
2774static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_0, &a6xx_coresight_regs[12]);
2775static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_1, &a6xx_coresight_regs[13]);
2776static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_2, &a6xx_coresight_regs[14]);
2777static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_3, &a6xx_coresight_regs[15]);
2778static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_bytel_0, &a6xx_coresight_regs[16]);
2779static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_bytel_1, &a6xx_coresight_regs[17]);
2780static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_0, &a6xx_coresight_regs[18]);
2781static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_1, &a6xx_coresight_regs[19]);
2782static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_2, &a6xx_coresight_regs[20]);
2783static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_3, &a6xx_coresight_regs[21]);
2784static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_0, &a6xx_coresight_regs[22]);
2785static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_1, &a6xx_coresight_regs[23]);
2786static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_2, &a6xx_coresight_regs[24]);
2787static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_3, &a6xx_coresight_regs[25]);
2788static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_nibblee, &a6xx_coresight_regs[26]);
2789static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ptrc0, &a6xx_coresight_regs[27]);
2790static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ptrc1, &a6xx_coresight_regs[28]);
2791static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_loadreg, &a6xx_coresight_regs[29]);
2792static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_idx, &a6xx_coresight_regs[30]);
2793static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_clrc, &a6xx_coresight_regs[31]);
2794static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_loadivt, &a6xx_coresight_regs[32]);
2795static ADRENO_CORESIGHT_ATTR(vbif_dbg_cntl, &a6xx_coresight_regs[33]);
2796static ADRENO_CORESIGHT_ATTR(dbg_lo_hi_gpio, &a6xx_coresight_regs[34]);
2797static ADRENO_CORESIGHT_ATTR(ext_trace_bus_cntl, &a6xx_coresight_regs[35]);
2798static ADRENO_CORESIGHT_ATTR(read_ahb_through_dbg, &a6xx_coresight_regs[36]);
2799static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_buf1, &a6xx_coresight_regs[37]);
2800static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_buf2, &a6xx_coresight_regs[38]);
2801static ADRENO_CORESIGHT_ATTR(evt_cfg, &a6xx_coresight_regs[39]);
2802static ADRENO_CORESIGHT_ATTR(evt_intf_sel_0, &a6xx_coresight_regs[40]);
2803static ADRENO_CORESIGHT_ATTR(evt_intf_sel_1, &a6xx_coresight_regs[41]);
2804static ADRENO_CORESIGHT_ATTR(perf_atb_cfg, &a6xx_coresight_regs[42]);
2805static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_0, &a6xx_coresight_regs[43]);
2806static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_1, &a6xx_coresight_regs[44]);
2807static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_2, &a6xx_coresight_regs[45]);
2808static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_3, &a6xx_coresight_regs[46]);
2809static ADRENO_CORESIGHT_ATTR(perf_atb_trig_intf_sel_0,
2810 &a6xx_coresight_regs[47]);
2811static ADRENO_CORESIGHT_ATTR(perf_atb_trig_intf_sel_1,
2812 &a6xx_coresight_regs[48]);
2813static ADRENO_CORESIGHT_ATTR(perf_atb_drain_cmd, &a6xx_coresight_regs[49]);
2814static ADRENO_CORESIGHT_ATTR(eco_cntl, &a6xx_coresight_regs[50]);
2815static ADRENO_CORESIGHT_ATTR(ahb_dbg_cntl, &a6xx_coresight_regs[51]);
2816
2817/*CX debug registers*/
2818static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_a,
2819 &a6xx_coresight_regs_cx[0]);
2820static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_b,
2821 &a6xx_coresight_regs_cx[1]);
2822static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_c,
2823 &a6xx_coresight_regs_cx[2]);
2824static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_d,
2825 &a6xx_coresight_regs_cx[3]);
2826static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_cntlt,
2827 &a6xx_coresight_regs_cx[4]);
2828static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_cntlm,
2829 &a6xx_coresight_regs_cx[5]);
2830static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_opl,
2831 &a6xx_coresight_regs_cx[6]);
2832static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ope,
2833 &a6xx_coresight_regs_cx[7]);
2834static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_0,
2835 &a6xx_coresight_regs_cx[8]);
2836static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_1,
2837 &a6xx_coresight_regs_cx[9]);
2838static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_2,
2839 &a6xx_coresight_regs_cx[10]);
2840static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_3,
2841 &a6xx_coresight_regs_cx[11]);
2842static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_0,
2843 &a6xx_coresight_regs_cx[12]);
2844static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_1,
2845 &a6xx_coresight_regs_cx[13]);
2846static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_2,
2847 &a6xx_coresight_regs_cx[14]);
2848static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_3,
2849 &a6xx_coresight_regs_cx[15]);
2850static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_bytel_0,
2851 &a6xx_coresight_regs_cx[16]);
2852static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_bytel_1,
2853 &a6xx_coresight_regs_cx[17]);
2854static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_0,
2855 &a6xx_coresight_regs_cx[18]);
2856static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_1,
2857 &a6xx_coresight_regs_cx[19]);
2858static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_2,
2859 &a6xx_coresight_regs_cx[20]);
2860static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_3,
2861 &a6xx_coresight_regs_cx[21]);
2862static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_0,
2863 &a6xx_coresight_regs_cx[22]);
2864static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_1,
2865 &a6xx_coresight_regs_cx[23]);
2866static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_2,
2867 &a6xx_coresight_regs_cx[24]);
2868static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_3,
2869 &a6xx_coresight_regs_cx[25]);
2870static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_nibblee,
2871 &a6xx_coresight_regs_cx[26]);
2872static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ptrc0,
2873 &a6xx_coresight_regs_cx[27]);
2874static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ptrc1,
2875 &a6xx_coresight_regs_cx[28]);
2876static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_loadreg,
2877 &a6xx_coresight_regs_cx[29]);
2878static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_idx,
2879 &a6xx_coresight_regs_cx[30]);
2880static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_clrc,
2881 &a6xx_coresight_regs_cx[31]);
2882static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_loadivt,
2883 &a6xx_coresight_regs_cx[32]);
2884static ADRENO_CORESIGHT_ATTR(cx_vbif_dbg_cntl,
2885 &a6xx_coresight_regs_cx[33]);
2886static ADRENO_CORESIGHT_ATTR(cx_dbg_lo_hi_gpio,
2887 &a6xx_coresight_regs_cx[34]);
2888static ADRENO_CORESIGHT_ATTR(cx_ext_trace_bus_cntl,
2889 &a6xx_coresight_regs_cx[35]);
2890static ADRENO_CORESIGHT_ATTR(cx_read_ahb_through_dbg,
2891 &a6xx_coresight_regs_cx[36]);
2892static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_trace_buf1,
2893 &a6xx_coresight_regs_cx[37]);
2894static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_trace_buf2,
2895 &a6xx_coresight_regs_cx[38]);
2896static ADRENO_CORESIGHT_ATTR(cx_evt_cfg,
2897 &a6xx_coresight_regs_cx[39]);
2898static ADRENO_CORESIGHT_ATTR(cx_evt_intf_sel_0,
2899 &a6xx_coresight_regs_cx[40]);
2900static ADRENO_CORESIGHT_ATTR(cx_evt_intf_sel_1,
2901 &a6xx_coresight_regs_cx[41]);
2902static ADRENO_CORESIGHT_ATTR(cx_perf_atb_cfg,
2903 &a6xx_coresight_regs_cx[42]);
2904static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_0,
2905 &a6xx_coresight_regs_cx[43]);
2906static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_1,
2907 &a6xx_coresight_regs_cx[44]);
2908static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_2,
2909 &a6xx_coresight_regs_cx[45]);
2910static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_3,
2911 &a6xx_coresight_regs_cx[46]);
2912static ADRENO_CORESIGHT_ATTR(cx_perf_atb_trig_intf_sel_0,
2913 &a6xx_coresight_regs_cx[47]);
2914static ADRENO_CORESIGHT_ATTR(cx_perf_atb_trig_intf_sel_1,
2915 &a6xx_coresight_regs_cx[48]);
2916static ADRENO_CORESIGHT_ATTR(cx_perf_atb_drain_cmd,
2917 &a6xx_coresight_regs_cx[49]);
2918static ADRENO_CORESIGHT_ATTR(cx_eco_cntl,
2919 &a6xx_coresight_regs_cx[50]);
2920static ADRENO_CORESIGHT_ATTR(cx_ahb_dbg_cntl,
2921 &a6xx_coresight_regs_cx[51]);
2922
2923static struct attribute *a6xx_coresight_attrs[] = {
2924 &coresight_attr_cfg_dbgbus_sel_a.attr.attr,
2925 &coresight_attr_cfg_dbgbus_sel_b.attr.attr,
2926 &coresight_attr_cfg_dbgbus_sel_c.attr.attr,
2927 &coresight_attr_cfg_dbgbus_sel_d.attr.attr,
2928 &coresight_attr_cfg_dbgbus_cntlt.attr.attr,
2929 &coresight_attr_cfg_dbgbus_cntlm.attr.attr,
2930 &coresight_attr_cfg_dbgbus_opl.attr.attr,
2931 &coresight_attr_cfg_dbgbus_ope.attr.attr,
2932 &coresight_attr_cfg_dbgbus_ivtl_0.attr.attr,
2933 &coresight_attr_cfg_dbgbus_ivtl_1.attr.attr,
2934 &coresight_attr_cfg_dbgbus_ivtl_2.attr.attr,
2935 &coresight_attr_cfg_dbgbus_ivtl_3.attr.attr,
2936 &coresight_attr_cfg_dbgbus_maskl_0.attr.attr,
2937 &coresight_attr_cfg_dbgbus_maskl_1.attr.attr,
2938 &coresight_attr_cfg_dbgbus_maskl_2.attr.attr,
2939 &coresight_attr_cfg_dbgbus_maskl_3.attr.attr,
2940 &coresight_attr_cfg_dbgbus_bytel_0.attr.attr,
2941 &coresight_attr_cfg_dbgbus_bytel_1.attr.attr,
2942 &coresight_attr_cfg_dbgbus_ivte_0.attr.attr,
2943 &coresight_attr_cfg_dbgbus_ivte_1.attr.attr,
2944 &coresight_attr_cfg_dbgbus_ivte_2.attr.attr,
2945 &coresight_attr_cfg_dbgbus_ivte_3.attr.attr,
2946 &coresight_attr_cfg_dbgbus_maske_0.attr.attr,
2947 &coresight_attr_cfg_dbgbus_maske_1.attr.attr,
2948 &coresight_attr_cfg_dbgbus_maske_2.attr.attr,
2949 &coresight_attr_cfg_dbgbus_maske_3.attr.attr,
2950 &coresight_attr_cfg_dbgbus_nibblee.attr.attr,
2951 &coresight_attr_cfg_dbgbus_ptrc0.attr.attr,
2952 &coresight_attr_cfg_dbgbus_ptrc1.attr.attr,
2953 &coresight_attr_cfg_dbgbus_loadreg.attr.attr,
2954 &coresight_attr_cfg_dbgbus_idx.attr.attr,
2955 &coresight_attr_cfg_dbgbus_clrc.attr.attr,
2956 &coresight_attr_cfg_dbgbus_loadivt.attr.attr,
2957 &coresight_attr_vbif_dbg_cntl.attr.attr,
2958 &coresight_attr_dbg_lo_hi_gpio.attr.attr,
2959 &coresight_attr_ext_trace_bus_cntl.attr.attr,
2960 &coresight_attr_read_ahb_through_dbg.attr.attr,
2961 &coresight_attr_cfg_dbgbus_trace_buf1.attr.attr,
2962 &coresight_attr_cfg_dbgbus_trace_buf2.attr.attr,
2963 &coresight_attr_evt_cfg.attr.attr,
2964 &coresight_attr_evt_intf_sel_0.attr.attr,
2965 &coresight_attr_evt_intf_sel_1.attr.attr,
2966 &coresight_attr_perf_atb_cfg.attr.attr,
2967 &coresight_attr_perf_atb_counter_sel_0.attr.attr,
2968 &coresight_attr_perf_atb_counter_sel_1.attr.attr,
2969 &coresight_attr_perf_atb_counter_sel_2.attr.attr,
2970 &coresight_attr_perf_atb_counter_sel_3.attr.attr,
2971 &coresight_attr_perf_atb_trig_intf_sel_0.attr.attr,
2972 &coresight_attr_perf_atb_trig_intf_sel_1.attr.attr,
2973 &coresight_attr_perf_atb_drain_cmd.attr.attr,
2974 &coresight_attr_eco_cntl.attr.attr,
2975 &coresight_attr_ahb_dbg_cntl.attr.attr,
2976 NULL,
2977};
2978
2979/*cx*/
2980static struct attribute *a6xx_coresight_attrs_cx[] = {
2981 &coresight_attr_cx_cfg_dbgbus_sel_a.attr.attr,
2982 &coresight_attr_cx_cfg_dbgbus_sel_b.attr.attr,
2983 &coresight_attr_cx_cfg_dbgbus_sel_c.attr.attr,
2984 &coresight_attr_cx_cfg_dbgbus_sel_d.attr.attr,
2985 &coresight_attr_cx_cfg_dbgbus_cntlt.attr.attr,
2986 &coresight_attr_cx_cfg_dbgbus_cntlm.attr.attr,
2987 &coresight_attr_cx_cfg_dbgbus_opl.attr.attr,
2988 &coresight_attr_cx_cfg_dbgbus_ope.attr.attr,
2989 &coresight_attr_cx_cfg_dbgbus_ivtl_0.attr.attr,
2990 &coresight_attr_cx_cfg_dbgbus_ivtl_1.attr.attr,
2991 &coresight_attr_cx_cfg_dbgbus_ivtl_2.attr.attr,
2992 &coresight_attr_cx_cfg_dbgbus_ivtl_3.attr.attr,
2993 &coresight_attr_cx_cfg_dbgbus_maskl_0.attr.attr,
2994 &coresight_attr_cx_cfg_dbgbus_maskl_1.attr.attr,
2995 &coresight_attr_cx_cfg_dbgbus_maskl_2.attr.attr,
2996 &coresight_attr_cx_cfg_dbgbus_maskl_3.attr.attr,
2997 &coresight_attr_cx_cfg_dbgbus_bytel_0.attr.attr,
2998 &coresight_attr_cx_cfg_dbgbus_bytel_1.attr.attr,
2999 &coresight_attr_cx_cfg_dbgbus_ivte_0.attr.attr,
3000 &coresight_attr_cx_cfg_dbgbus_ivte_1.attr.attr,
3001 &coresight_attr_cx_cfg_dbgbus_ivte_2.attr.attr,
3002 &coresight_attr_cx_cfg_dbgbus_ivte_3.attr.attr,
3003 &coresight_attr_cx_cfg_dbgbus_maske_0.attr.attr,
3004 &coresight_attr_cx_cfg_dbgbus_maske_1.attr.attr,
3005 &coresight_attr_cx_cfg_dbgbus_maske_2.attr.attr,
3006 &coresight_attr_cx_cfg_dbgbus_maske_3.attr.attr,
3007 &coresight_attr_cx_cfg_dbgbus_nibblee.attr.attr,
3008 &coresight_attr_cx_cfg_dbgbus_ptrc0.attr.attr,
3009 &coresight_attr_cx_cfg_dbgbus_ptrc1.attr.attr,
3010 &coresight_attr_cx_cfg_dbgbus_loadreg.attr.attr,
3011 &coresight_attr_cx_cfg_dbgbus_idx.attr.attr,
3012 &coresight_attr_cx_cfg_dbgbus_clrc.attr.attr,
3013 &coresight_attr_cx_cfg_dbgbus_loadivt.attr.attr,
3014 &coresight_attr_cx_vbif_dbg_cntl.attr.attr,
3015 &coresight_attr_cx_dbg_lo_hi_gpio.attr.attr,
3016 &coresight_attr_cx_ext_trace_bus_cntl.attr.attr,
3017 &coresight_attr_cx_read_ahb_through_dbg.attr.attr,
3018 &coresight_attr_cx_cfg_dbgbus_trace_buf1.attr.attr,
3019 &coresight_attr_cx_cfg_dbgbus_trace_buf2.attr.attr,
3020 &coresight_attr_cx_evt_cfg.attr.attr,
3021 &coresight_attr_cx_evt_intf_sel_0.attr.attr,
3022 &coresight_attr_cx_evt_intf_sel_1.attr.attr,
3023 &coresight_attr_cx_perf_atb_cfg.attr.attr,
3024 &coresight_attr_cx_perf_atb_counter_sel_0.attr.attr,
3025 &coresight_attr_cx_perf_atb_counter_sel_1.attr.attr,
3026 &coresight_attr_cx_perf_atb_counter_sel_2.attr.attr,
3027 &coresight_attr_cx_perf_atb_counter_sel_3.attr.attr,
3028 &coresight_attr_cx_perf_atb_trig_intf_sel_0.attr.attr,
3029 &coresight_attr_cx_perf_atb_trig_intf_sel_1.attr.attr,
3030 &coresight_attr_cx_perf_atb_drain_cmd.attr.attr,
3031 &coresight_attr_cx_eco_cntl.attr.attr,
3032 &coresight_attr_cx_ahb_dbg_cntl.attr.attr,
3033 NULL,
3034};
3035
3036static const struct attribute_group a6xx_coresight_group = {
3037 .attrs = a6xx_coresight_attrs,
3038};
3039
3040static const struct attribute_group *a6xx_coresight_groups[] = {
3041 &a6xx_coresight_group,
3042 NULL,
3043};
3044
3045static const struct attribute_group a6xx_coresight_group_cx = {
3046 .attrs = a6xx_coresight_attrs_cx,
3047};
3048
3049static const struct attribute_group *a6xx_coresight_groups_cx[] = {
3050 &a6xx_coresight_group_cx,
3051 NULL,
3052};
3053
3054static struct adreno_coresight a6xx_coresight = {
3055 .registers = a6xx_coresight_regs,
3056 .count = ARRAY_SIZE(a6xx_coresight_regs),
3057 .groups = a6xx_coresight_groups,
3058};
3059
3060static struct adreno_coresight a6xx_coresight_cx = {
3061 .registers = a6xx_coresight_regs_cx,
3062 .count = ARRAY_SIZE(a6xx_coresight_regs_cx),
3063 .groups = a6xx_coresight_groups_cx,
3064};
3065
Lynus Vaz107d2892017-03-01 13:48:06 +05303066static struct adreno_perfcount_register a6xx_perfcounters_cp[] = {
3067 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO,
3068 A6XX_RBBM_PERFCTR_CP_0_HI, 0, A6XX_CP_PERFCTR_CP_SEL_0 },
3069 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_1_LO,
3070 A6XX_RBBM_PERFCTR_CP_1_HI, 1, A6XX_CP_PERFCTR_CP_SEL_1 },
3071 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_2_LO,
3072 A6XX_RBBM_PERFCTR_CP_2_HI, 2, A6XX_CP_PERFCTR_CP_SEL_2 },
3073 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_3_LO,
3074 A6XX_RBBM_PERFCTR_CP_3_HI, 3, A6XX_CP_PERFCTR_CP_SEL_3 },
3075 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_4_LO,
3076 A6XX_RBBM_PERFCTR_CP_4_HI, 4, A6XX_CP_PERFCTR_CP_SEL_4 },
3077 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_5_LO,
3078 A6XX_RBBM_PERFCTR_CP_5_HI, 5, A6XX_CP_PERFCTR_CP_SEL_5 },
3079 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_6_LO,
3080 A6XX_RBBM_PERFCTR_CP_6_HI, 6, A6XX_CP_PERFCTR_CP_SEL_6 },
3081 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_7_LO,
3082 A6XX_RBBM_PERFCTR_CP_7_HI, 7, A6XX_CP_PERFCTR_CP_SEL_7 },
3083 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_8_LO,
3084 A6XX_RBBM_PERFCTR_CP_8_HI, 8, A6XX_CP_PERFCTR_CP_SEL_8 },
3085 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_9_LO,
3086 A6XX_RBBM_PERFCTR_CP_9_HI, 9, A6XX_CP_PERFCTR_CP_SEL_9 },
3087 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_10_LO,
3088 A6XX_RBBM_PERFCTR_CP_10_HI, 10, A6XX_CP_PERFCTR_CP_SEL_10 },
3089 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_11_LO,
3090 A6XX_RBBM_PERFCTR_CP_11_HI, 11, A6XX_CP_PERFCTR_CP_SEL_11 },
3091 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_12_LO,
3092 A6XX_RBBM_PERFCTR_CP_12_HI, 12, A6XX_CP_PERFCTR_CP_SEL_12 },
3093 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_13_LO,
3094 A6XX_RBBM_PERFCTR_CP_13_HI, 13, A6XX_CP_PERFCTR_CP_SEL_13 },
3095};
3096
3097static struct adreno_perfcount_register a6xx_perfcounters_rbbm[] = {
3098 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_0_LO,
3099 A6XX_RBBM_PERFCTR_RBBM_0_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_0 },
3100 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_1_LO,
3101 A6XX_RBBM_PERFCTR_RBBM_1_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_1 },
3102 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_2_LO,
3103 A6XX_RBBM_PERFCTR_RBBM_2_HI, 16, A6XX_RBBM_PERFCTR_RBBM_SEL_2 },
3104 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_3_LO,
3105 A6XX_RBBM_PERFCTR_RBBM_3_HI, 17, A6XX_RBBM_PERFCTR_RBBM_SEL_3 },
3106};
3107
3108static struct adreno_perfcount_register a6xx_perfcounters_pc[] = {
3109 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_0_LO,
3110 A6XX_RBBM_PERFCTR_PC_0_HI, 18, A6XX_PC_PERFCTR_PC_SEL_0 },
3111 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_1_LO,
3112 A6XX_RBBM_PERFCTR_PC_1_HI, 19, A6XX_PC_PERFCTR_PC_SEL_1 },
3113 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_2_LO,
3114 A6XX_RBBM_PERFCTR_PC_2_HI, 20, A6XX_PC_PERFCTR_PC_SEL_2 },
3115 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_3_LO,
3116 A6XX_RBBM_PERFCTR_PC_3_HI, 21, A6XX_PC_PERFCTR_PC_SEL_3 },
3117 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_4_LO,
3118 A6XX_RBBM_PERFCTR_PC_4_HI, 22, A6XX_PC_PERFCTR_PC_SEL_4 },
3119 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_5_LO,
3120 A6XX_RBBM_PERFCTR_PC_5_HI, 23, A6XX_PC_PERFCTR_PC_SEL_5 },
3121 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_6_LO,
3122 A6XX_RBBM_PERFCTR_PC_6_HI, 24, A6XX_PC_PERFCTR_PC_SEL_6 },
3123 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_7_LO,
3124 A6XX_RBBM_PERFCTR_PC_7_HI, 25, A6XX_PC_PERFCTR_PC_SEL_7 },
3125};
3126
3127static struct adreno_perfcount_register a6xx_perfcounters_vfd[] = {
3128 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_0_LO,
3129 A6XX_RBBM_PERFCTR_VFD_0_HI, 26, A6XX_VFD_PERFCTR_VFD_SEL_0 },
3130 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_1_LO,
3131 A6XX_RBBM_PERFCTR_VFD_1_HI, 27, A6XX_VFD_PERFCTR_VFD_SEL_1 },
3132 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_2_LO,
3133 A6XX_RBBM_PERFCTR_VFD_2_HI, 28, A6XX_VFD_PERFCTR_VFD_SEL_2 },
3134 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_3_LO,
3135 A6XX_RBBM_PERFCTR_VFD_3_HI, 29, A6XX_VFD_PERFCTR_VFD_SEL_3 },
3136 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_4_LO,
3137 A6XX_RBBM_PERFCTR_VFD_4_HI, 30, A6XX_VFD_PERFCTR_VFD_SEL_4 },
3138 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_5_LO,
3139 A6XX_RBBM_PERFCTR_VFD_5_HI, 31, A6XX_VFD_PERFCTR_VFD_SEL_5 },
3140 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_6_LO,
3141 A6XX_RBBM_PERFCTR_VFD_6_HI, 32, A6XX_VFD_PERFCTR_VFD_SEL_6 },
3142 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_7_LO,
3143 A6XX_RBBM_PERFCTR_VFD_7_HI, 33, A6XX_VFD_PERFCTR_VFD_SEL_7 },
3144};
3145
3146static struct adreno_perfcount_register a6xx_perfcounters_hlsq[] = {
3147 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_0_LO,
3148 A6XX_RBBM_PERFCTR_HLSQ_0_HI, 34, A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
3149 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_1_LO,
3150 A6XX_RBBM_PERFCTR_HLSQ_1_HI, 35, A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
3151 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_2_LO,
3152 A6XX_RBBM_PERFCTR_HLSQ_2_HI, 36, A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
3153 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_3_LO,
3154 A6XX_RBBM_PERFCTR_HLSQ_3_HI, 37, A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
3155 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_4_LO,
3156 A6XX_RBBM_PERFCTR_HLSQ_4_HI, 38, A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
3157 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_5_LO,
3158 A6XX_RBBM_PERFCTR_HLSQ_5_HI, 39, A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
3159};
3160
3161static struct adreno_perfcount_register a6xx_perfcounters_vpc[] = {
3162 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_0_LO,
3163 A6XX_RBBM_PERFCTR_VPC_0_HI, 40, A6XX_VPC_PERFCTR_VPC_SEL_0 },
3164 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_1_LO,
3165 A6XX_RBBM_PERFCTR_VPC_1_HI, 41, A6XX_VPC_PERFCTR_VPC_SEL_1 },
3166 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_2_LO,
3167 A6XX_RBBM_PERFCTR_VPC_2_HI, 42, A6XX_VPC_PERFCTR_VPC_SEL_2 },
3168 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_3_LO,
3169 A6XX_RBBM_PERFCTR_VPC_3_HI, 43, A6XX_VPC_PERFCTR_VPC_SEL_3 },
3170 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_4_LO,
3171 A6XX_RBBM_PERFCTR_VPC_4_HI, 44, A6XX_VPC_PERFCTR_VPC_SEL_4 },
3172 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_5_LO,
3173 A6XX_RBBM_PERFCTR_VPC_5_HI, 45, A6XX_VPC_PERFCTR_VPC_SEL_5 },
3174};
3175
3176static struct adreno_perfcount_register a6xx_perfcounters_ccu[] = {
3177 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_0_LO,
3178 A6XX_RBBM_PERFCTR_CCU_0_HI, 46, A6XX_RB_PERFCTR_CCU_SEL_0 },
3179 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_1_LO,
3180 A6XX_RBBM_PERFCTR_CCU_1_HI, 47, A6XX_RB_PERFCTR_CCU_SEL_1 },
3181 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_2_LO,
3182 A6XX_RBBM_PERFCTR_CCU_2_HI, 48, A6XX_RB_PERFCTR_CCU_SEL_2 },
3183 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_3_LO,
3184 A6XX_RBBM_PERFCTR_CCU_3_HI, 49, A6XX_RB_PERFCTR_CCU_SEL_3 },
3185 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_4_LO,
3186 A6XX_RBBM_PERFCTR_CCU_4_HI, 50, A6XX_RB_PERFCTR_CCU_SEL_4 },
3187};
3188
3189static struct adreno_perfcount_register a6xx_perfcounters_tse[] = {
3190 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_0_LO,
3191 A6XX_RBBM_PERFCTR_TSE_0_HI, 51, A6XX_GRAS_PERFCTR_TSE_SEL_0 },
3192 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_1_LO,
3193 A6XX_RBBM_PERFCTR_TSE_1_HI, 52, A6XX_GRAS_PERFCTR_TSE_SEL_1 },
3194 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_2_LO,
3195 A6XX_RBBM_PERFCTR_TSE_2_HI, 53, A6XX_GRAS_PERFCTR_TSE_SEL_2 },
3196 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_3_LO,
3197 A6XX_RBBM_PERFCTR_TSE_3_HI, 54, A6XX_GRAS_PERFCTR_TSE_SEL_3 },
3198};
3199
3200static struct adreno_perfcount_register a6xx_perfcounters_ras[] = {
3201 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_0_LO,
3202 A6XX_RBBM_PERFCTR_RAS_0_HI, 55, A6XX_GRAS_PERFCTR_RAS_SEL_0 },
3203 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_1_LO,
3204 A6XX_RBBM_PERFCTR_RAS_1_HI, 56, A6XX_GRAS_PERFCTR_RAS_SEL_1 },
3205 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_2_LO,
3206 A6XX_RBBM_PERFCTR_RAS_2_HI, 57, A6XX_GRAS_PERFCTR_RAS_SEL_2 },
3207 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_3_LO,
3208 A6XX_RBBM_PERFCTR_RAS_3_HI, 58, A6XX_GRAS_PERFCTR_RAS_SEL_3 },
3209};
3210
3211static struct adreno_perfcount_register a6xx_perfcounters_uche[] = {
3212 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_0_LO,
3213 A6XX_RBBM_PERFCTR_UCHE_0_HI, 59, A6XX_UCHE_PERFCTR_UCHE_SEL_0 },
3214 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_1_LO,
3215 A6XX_RBBM_PERFCTR_UCHE_1_HI, 60, A6XX_UCHE_PERFCTR_UCHE_SEL_1 },
3216 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_2_LO,
3217 A6XX_RBBM_PERFCTR_UCHE_2_HI, 61, A6XX_UCHE_PERFCTR_UCHE_SEL_2 },
3218 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_3_LO,
3219 A6XX_RBBM_PERFCTR_UCHE_3_HI, 62, A6XX_UCHE_PERFCTR_UCHE_SEL_3 },
3220 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_4_LO,
3221 A6XX_RBBM_PERFCTR_UCHE_4_HI, 63, A6XX_UCHE_PERFCTR_UCHE_SEL_4 },
3222 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_5_LO,
3223 A6XX_RBBM_PERFCTR_UCHE_5_HI, 64, A6XX_UCHE_PERFCTR_UCHE_SEL_5 },
3224 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_6_LO,
3225 A6XX_RBBM_PERFCTR_UCHE_6_HI, 65, A6XX_UCHE_PERFCTR_UCHE_SEL_6 },
3226 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_7_LO,
3227 A6XX_RBBM_PERFCTR_UCHE_7_HI, 66, A6XX_UCHE_PERFCTR_UCHE_SEL_7 },
3228 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_8_LO,
3229 A6XX_RBBM_PERFCTR_UCHE_8_HI, 67, A6XX_UCHE_PERFCTR_UCHE_SEL_8 },
3230 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_9_LO,
3231 A6XX_RBBM_PERFCTR_UCHE_9_HI, 68, A6XX_UCHE_PERFCTR_UCHE_SEL_9 },
3232 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_10_LO,
3233 A6XX_RBBM_PERFCTR_UCHE_10_HI, 69,
3234 A6XX_UCHE_PERFCTR_UCHE_SEL_10 },
3235 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_11_LO,
3236 A6XX_RBBM_PERFCTR_UCHE_11_HI, 70,
3237 A6XX_UCHE_PERFCTR_UCHE_SEL_11 },
3238};
3239
3240static struct adreno_perfcount_register a6xx_perfcounters_tp[] = {
3241 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_0_LO,
3242 A6XX_RBBM_PERFCTR_TP_0_HI, 71, A6XX_TPL1_PERFCTR_TP_SEL_0 },
3243 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_1_LO,
3244 A6XX_RBBM_PERFCTR_TP_1_HI, 72, A6XX_TPL1_PERFCTR_TP_SEL_1 },
3245 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_2_LO,
3246 A6XX_RBBM_PERFCTR_TP_2_HI, 73, A6XX_TPL1_PERFCTR_TP_SEL_2 },
3247 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_3_LO,
3248 A6XX_RBBM_PERFCTR_TP_3_HI, 74, A6XX_TPL1_PERFCTR_TP_SEL_3 },
3249 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_4_LO,
3250 A6XX_RBBM_PERFCTR_TP_4_HI, 75, A6XX_TPL1_PERFCTR_TP_SEL_4 },
3251 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_5_LO,
3252 A6XX_RBBM_PERFCTR_TP_5_HI, 76, A6XX_TPL1_PERFCTR_TP_SEL_5 },
3253 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_6_LO,
3254 A6XX_RBBM_PERFCTR_TP_6_HI, 77, A6XX_TPL1_PERFCTR_TP_SEL_6 },
3255 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_7_LO,
3256 A6XX_RBBM_PERFCTR_TP_7_HI, 78, A6XX_TPL1_PERFCTR_TP_SEL_7 },
3257 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_8_LO,
3258 A6XX_RBBM_PERFCTR_TP_8_HI, 79, A6XX_TPL1_PERFCTR_TP_SEL_8 },
3259 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_9_LO,
3260 A6XX_RBBM_PERFCTR_TP_9_HI, 80, A6XX_TPL1_PERFCTR_TP_SEL_9 },
3261 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_10_LO,
3262 A6XX_RBBM_PERFCTR_TP_10_HI, 81, A6XX_TPL1_PERFCTR_TP_SEL_10 },
3263 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_11_LO,
3264 A6XX_RBBM_PERFCTR_TP_11_HI, 82, A6XX_TPL1_PERFCTR_TP_SEL_11 },
3265};
3266
3267static struct adreno_perfcount_register a6xx_perfcounters_sp[] = {
3268 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_0_LO,
3269 A6XX_RBBM_PERFCTR_SP_0_HI, 83, A6XX_SP_PERFCTR_SP_SEL_0 },
3270 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_1_LO,
3271 A6XX_RBBM_PERFCTR_SP_1_HI, 84, A6XX_SP_PERFCTR_SP_SEL_1 },
3272 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_2_LO,
3273 A6XX_RBBM_PERFCTR_SP_2_HI, 85, A6XX_SP_PERFCTR_SP_SEL_2 },
3274 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_3_LO,
3275 A6XX_RBBM_PERFCTR_SP_3_HI, 86, A6XX_SP_PERFCTR_SP_SEL_3 },
3276 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_4_LO,
3277 A6XX_RBBM_PERFCTR_SP_4_HI, 87, A6XX_SP_PERFCTR_SP_SEL_4 },
3278 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_5_LO,
3279 A6XX_RBBM_PERFCTR_SP_5_HI, 88, A6XX_SP_PERFCTR_SP_SEL_5 },
3280 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_6_LO,
3281 A6XX_RBBM_PERFCTR_SP_6_HI, 89, A6XX_SP_PERFCTR_SP_SEL_6 },
3282 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_7_LO,
3283 A6XX_RBBM_PERFCTR_SP_7_HI, 90, A6XX_SP_PERFCTR_SP_SEL_7 },
3284 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_8_LO,
3285 A6XX_RBBM_PERFCTR_SP_8_HI, 91, A6XX_SP_PERFCTR_SP_SEL_8 },
3286 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_9_LO,
3287 A6XX_RBBM_PERFCTR_SP_9_HI, 92, A6XX_SP_PERFCTR_SP_SEL_9 },
3288 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_10_LO,
3289 A6XX_RBBM_PERFCTR_SP_10_HI, 93, A6XX_SP_PERFCTR_SP_SEL_10 },
3290 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_11_LO,
3291 A6XX_RBBM_PERFCTR_SP_11_HI, 94, A6XX_SP_PERFCTR_SP_SEL_11 },
3292 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_12_LO,
3293 A6XX_RBBM_PERFCTR_SP_12_HI, 95, A6XX_SP_PERFCTR_SP_SEL_12 },
3294 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_13_LO,
3295 A6XX_RBBM_PERFCTR_SP_13_HI, 96, A6XX_SP_PERFCTR_SP_SEL_13 },
3296 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_14_LO,
3297 A6XX_RBBM_PERFCTR_SP_14_HI, 97, A6XX_SP_PERFCTR_SP_SEL_14 },
3298 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_15_LO,
3299 A6XX_RBBM_PERFCTR_SP_15_HI, 98, A6XX_SP_PERFCTR_SP_SEL_15 },
3300 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_16_LO,
3301 A6XX_RBBM_PERFCTR_SP_16_HI, 99, A6XX_SP_PERFCTR_SP_SEL_16 },
3302 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_17_LO,
3303 A6XX_RBBM_PERFCTR_SP_17_HI, 100, A6XX_SP_PERFCTR_SP_SEL_17 },
3304 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_18_LO,
3305 A6XX_RBBM_PERFCTR_SP_18_HI, 101, A6XX_SP_PERFCTR_SP_SEL_18 },
3306 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_19_LO,
3307 A6XX_RBBM_PERFCTR_SP_19_HI, 102, A6XX_SP_PERFCTR_SP_SEL_19 },
3308 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_20_LO,
3309 A6XX_RBBM_PERFCTR_SP_20_HI, 103, A6XX_SP_PERFCTR_SP_SEL_20 },
3310 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_21_LO,
3311 A6XX_RBBM_PERFCTR_SP_21_HI, 104, A6XX_SP_PERFCTR_SP_SEL_21 },
3312 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_22_LO,
3313 A6XX_RBBM_PERFCTR_SP_22_HI, 105, A6XX_SP_PERFCTR_SP_SEL_22 },
3314 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_23_LO,
3315 A6XX_RBBM_PERFCTR_SP_23_HI, 106, A6XX_SP_PERFCTR_SP_SEL_23 },
3316};
3317
3318static struct adreno_perfcount_register a6xx_perfcounters_rb[] = {
3319 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_0_LO,
3320 A6XX_RBBM_PERFCTR_RB_0_HI, 107, A6XX_RB_PERFCTR_RB_SEL_0 },
3321 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_1_LO,
3322 A6XX_RBBM_PERFCTR_RB_1_HI, 108, A6XX_RB_PERFCTR_RB_SEL_1 },
3323 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_2_LO,
3324 A6XX_RBBM_PERFCTR_RB_2_HI, 109, A6XX_RB_PERFCTR_RB_SEL_2 },
3325 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_3_LO,
3326 A6XX_RBBM_PERFCTR_RB_3_HI, 110, A6XX_RB_PERFCTR_RB_SEL_3 },
3327 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_4_LO,
3328 A6XX_RBBM_PERFCTR_RB_4_HI, 111, A6XX_RB_PERFCTR_RB_SEL_4 },
3329 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_5_LO,
3330 A6XX_RBBM_PERFCTR_RB_5_HI, 112, A6XX_RB_PERFCTR_RB_SEL_5 },
3331 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_6_LO,
3332 A6XX_RBBM_PERFCTR_RB_6_HI, 113, A6XX_RB_PERFCTR_RB_SEL_6 },
3333 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_7_LO,
3334 A6XX_RBBM_PERFCTR_RB_7_HI, 114, A6XX_RB_PERFCTR_RB_SEL_7 },
3335};
3336
3337static struct adreno_perfcount_register a6xx_perfcounters_vsc[] = {
3338 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_0_LO,
3339 A6XX_RBBM_PERFCTR_VSC_0_HI, 115, A6XX_VSC_PERFCTR_VSC_SEL_0 },
3340 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_1_LO,
3341 A6XX_RBBM_PERFCTR_VSC_1_HI, 116, A6XX_VSC_PERFCTR_VSC_SEL_1 },
3342};
3343
3344static struct adreno_perfcount_register a6xx_perfcounters_lrz[] = {
3345 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_0_LO,
3346 A6XX_RBBM_PERFCTR_LRZ_0_HI, 117, A6XX_GRAS_PERFCTR_LRZ_SEL_0 },
3347 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_1_LO,
3348 A6XX_RBBM_PERFCTR_LRZ_1_HI, 118, A6XX_GRAS_PERFCTR_LRZ_SEL_1 },
3349 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_2_LO,
3350 A6XX_RBBM_PERFCTR_LRZ_2_HI, 119, A6XX_GRAS_PERFCTR_LRZ_SEL_2 },
3351 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_3_LO,
3352 A6XX_RBBM_PERFCTR_LRZ_3_HI, 120, A6XX_GRAS_PERFCTR_LRZ_SEL_3 },
3353};
3354
3355static struct adreno_perfcount_register a6xx_perfcounters_cmp[] = {
3356 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_0_LO,
3357 A6XX_RBBM_PERFCTR_CMP_0_HI, 121, A6XX_RB_PERFCTR_CMP_SEL_0 },
3358 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_1_LO,
3359 A6XX_RBBM_PERFCTR_CMP_1_HI, 122, A6XX_RB_PERFCTR_CMP_SEL_1 },
3360 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_2_LO,
3361 A6XX_RBBM_PERFCTR_CMP_2_HI, 123, A6XX_RB_PERFCTR_CMP_SEL_2 },
3362 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_3_LO,
3363 A6XX_RBBM_PERFCTR_CMP_3_HI, 124, A6XX_RB_PERFCTR_CMP_SEL_3 },
3364};
3365
3366static struct adreno_perfcount_register a6xx_perfcounters_vbif[] = {
3367 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW0,
3368 A6XX_VBIF_PERF_CNT_HIGH0, -1, A6XX_VBIF_PERF_CNT_SEL0 },
3369 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW1,
3370 A6XX_VBIF_PERF_CNT_HIGH1, -1, A6XX_VBIF_PERF_CNT_SEL1 },
3371 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW2,
3372 A6XX_VBIF_PERF_CNT_HIGH2, -1, A6XX_VBIF_PERF_CNT_SEL2 },
3373 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW3,
3374 A6XX_VBIF_PERF_CNT_HIGH3, -1, A6XX_VBIF_PERF_CNT_SEL3 },
3375};
3376
3377static struct adreno_perfcount_register a6xx_perfcounters_vbif_pwr[] = {
3378 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW0,
3379 A6XX_VBIF_PERF_PWR_CNT_HIGH0, -1, A6XX_VBIF_PERF_PWR_CNT_EN0 },
3380 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW1,
3381 A6XX_VBIF_PERF_PWR_CNT_HIGH1, -1, A6XX_VBIF_PERF_PWR_CNT_EN1 },
3382 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW2,
3383 A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
3384};
3385
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05303386
3387static struct adreno_perfcount_register a6xx_perfcounters_gbif[] = {
3388 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW0,
3389 A6XX_GBIF_PERF_CNT_HIGH0, -1, A6XX_GBIF_PERF_CNT_SEL },
3390 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW1,
3391 A6XX_GBIF_PERF_CNT_HIGH1, -1, A6XX_GBIF_PERF_CNT_SEL },
3392 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW2,
3393 A6XX_GBIF_PERF_CNT_HIGH2, -1, A6XX_GBIF_PERF_CNT_SEL },
3394 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW3,
3395 A6XX_GBIF_PERF_CNT_HIGH3, -1, A6XX_GBIF_PERF_CNT_SEL },
3396};
3397
3398static struct adreno_perfcount_register a6xx_perfcounters_gbif_pwr[] = {
3399 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW0,
3400 A6XX_GBIF_PWR_CNT_HIGH0, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
3401 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW1,
3402 A6XX_GBIF_PWR_CNT_HIGH1, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
3403 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW2,
3404 A6XX_GBIF_PWR_CNT_HIGH2, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
3405};
3406
Lynus Vaz856ca602017-05-24 16:56:36 +05303407static struct adreno_perfcount_register a6xx_perfcounters_pwr[] = {
3408 { KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
3409 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3410 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
3411 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1, 0 },
3412};
3413
Lynus Vaz107d2892017-03-01 13:48:06 +05303414static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
3415 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
3416 A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
3417};
3418
Lynus Vaz4fc97e22017-06-01 20:03:35 +05303419static struct adreno_perfcount_register a6xx_pwrcounters_gpmu[] = {
3420 /*
3421 * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0 is used for the GPU
3422 * busy count (see the PWR group above). Mark it as broken
3423 * so it's not re-used.
3424 */
3425 { KGSL_PERFCOUNTER_BROKEN, 0, 0,
3426 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
3427 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1,
3428 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
3429 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3430 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
3431 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H, -1,
3432 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
3433 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3434 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
3435 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H, -1,
3436 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
3437 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3438 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
3439 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H, -1,
3440 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
3441 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3442 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L,
3443 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H, -1,
3444 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
3445 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3446 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L,
3447 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H, -1,
3448 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
3449};
3450
Tarun Karra1382e512017-10-30 19:41:25 -07003451/*
3452 * ADRENO_PERFCOUNTER_GROUP_RESTORE flag is enabled by default
3453 * because most of the perfcounter groups need to be restored
3454 * as part of preemption and IFPC. Perfcounter groups that are
3455 * not restored as part of preemption and IFPC should be defined
3456 * using A6XX_PERFCOUNTER_GROUP_FLAGS macro
3457 */
Lynus Vaz107d2892017-03-01 13:48:06 +05303458#define A6XX_PERFCOUNTER_GROUP(offset, name) \
Tarun Karra1382e512017-10-30 19:41:25 -07003459 ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, \
3460 ADRENO_PERFCOUNTER_GROUP_RESTORE)
Lynus Vaz107d2892017-03-01 13:48:06 +05303461
3462#define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
3463 ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
3464
Lynus Vaz4fc97e22017-06-01 20:03:35 +05303465#define A6XX_POWER_COUNTER_GROUP(offset, name) \
3466 ADRENO_POWER_COUNTER_GROUP(a6xx, offset, name)
3467
Lynus Vaz107d2892017-03-01 13:48:06 +05303468static struct adreno_perfcount_group a6xx_perfcounter_groups
3469 [KGSL_PERFCOUNTER_GROUP_MAX] = {
3470 A6XX_PERFCOUNTER_GROUP(CP, cp),
Tarun Karra1382e512017-10-30 19:41:25 -07003471 A6XX_PERFCOUNTER_GROUP_FLAGS(RBBM, rbbm, 0),
Lynus Vaz107d2892017-03-01 13:48:06 +05303472 A6XX_PERFCOUNTER_GROUP(PC, pc),
3473 A6XX_PERFCOUNTER_GROUP(VFD, vfd),
3474 A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
3475 A6XX_PERFCOUNTER_GROUP(VPC, vpc),
3476 A6XX_PERFCOUNTER_GROUP(CCU, ccu),
3477 A6XX_PERFCOUNTER_GROUP(CMP, cmp),
3478 A6XX_PERFCOUNTER_GROUP(TSE, tse),
3479 A6XX_PERFCOUNTER_GROUP(RAS, ras),
3480 A6XX_PERFCOUNTER_GROUP(LRZ, lrz),
3481 A6XX_PERFCOUNTER_GROUP(UCHE, uche),
3482 A6XX_PERFCOUNTER_GROUP(TP, tp),
3483 A6XX_PERFCOUNTER_GROUP(SP, sp),
3484 A6XX_PERFCOUNTER_GROUP(RB, rb),
3485 A6XX_PERFCOUNTER_GROUP(VSC, vsc),
Tarun Karra1382e512017-10-30 19:41:25 -07003486 A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, vbif, 0),
Lynus Vaz107d2892017-03-01 13:48:06 +05303487 A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
3488 ADRENO_PERFCOUNTER_GROUP_FIXED),
Lynus Vaz856ca602017-05-24 16:56:36 +05303489 A6XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
3490 ADRENO_PERFCOUNTER_GROUP_FIXED),
Lynus Vaz107d2892017-03-01 13:48:06 +05303491 A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
3492 ADRENO_PERFCOUNTER_GROUP_FIXED),
Lynus Vaz4fc97e22017-06-01 20:03:35 +05303493 A6XX_POWER_COUNTER_GROUP(GPMU, gpmu),
Lynus Vaz107d2892017-03-01 13:48:06 +05303494};
3495
3496static struct adreno_perfcounters a6xx_perfcounters = {
3497 a6xx_perfcounter_groups,
3498 ARRAY_SIZE(a6xx_perfcounter_groups),
3499};
3500
Lynus Vaz856ca602017-05-24 16:56:36 +05303501/* Program the GMU power counter to count GPU busy cycles */
3502static int a6xx_enable_pwr_counters(struct adreno_device *adreno_dev,
3503 unsigned int counter)
3504{
3505 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
3506
3507 /*
3508 * We have a limited number of power counters. Since we're not using
3509 * total GPU cycle count, return error if requested.
3510 */
3511 if (counter == 0)
3512 return -EINVAL;
3513
3514 if (!device->gmu.pdev)
3515 return -ENODEV;
3516
Kyle Piefer50af7d02017-07-25 11:00:17 -07003517 kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xFF000000);
Lynus Vaz856ca602017-05-24 16:56:36 +05303518 kgsl_regrmw(device,
Kyle Piefer50af7d02017-07-25 11:00:17 -07003519 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xFF, 0x20);
Lynus Vaz856ca602017-05-24 16:56:36 +05303520 kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
3521
3522 return 0;
3523}
3524
Rajesh Kemisetti10bbec92017-10-20 10:55:58 +05303525static void a6xx_efuse_speed_bin(struct adreno_device *adreno_dev)
3526{
3527 unsigned int val;
3528 unsigned int speed_bin[3];
3529 struct kgsl_device *device = &adreno_dev->dev;
3530
3531 if (of_property_read_u32_array(device->pdev->dev.of_node,
3532 "qcom,gpu-speed-bin", speed_bin, 3))
3533 return;
3534
3535 adreno_efuse_read_u32(adreno_dev, speed_bin[0], &val);
3536
3537 adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
3538}
3539
3540static const struct {
3541 int (*check)(struct adreno_device *adreno_dev);
3542 void (*func)(struct adreno_device *adreno_dev);
3543} a6xx_efuse_funcs[] = {
3544 { adreno_is_a615, a6xx_efuse_speed_bin },
3545};
3546
3547static void a6xx_check_features(struct adreno_device *adreno_dev)
3548{
3549 unsigned int i;
3550
3551 if (adreno_efuse_map(adreno_dev))
3552 return;
3553 for (i = 0; i < ARRAY_SIZE(a6xx_efuse_funcs); i++) {
3554 if (a6xx_efuse_funcs[i].check(adreno_dev))
3555 a6xx_efuse_funcs[i].func(adreno_dev);
3556 }
3557
3558 adreno_efuse_unmap(adreno_dev);
3559}
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05303560static void a6xx_platform_setup(struct adreno_device *adreno_dev)
3561{
3562 uint64_t addr;
3563 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
3564
3565 /* Calculate SP local and private mem addresses */
3566 addr = ALIGN(ADRENO_UCHE_GMEM_BASE + adreno_dev->gmem_size, SZ_64K);
3567 adreno_dev->sp_local_gpuaddr = addr;
3568 adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
3569
3570 if (adreno_has_gbif(adreno_dev)) {
3571 a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].regs =
3572 a6xx_perfcounters_gbif;
3573 a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].reg_count
3574 = ARRAY_SIZE(a6xx_perfcounters_gbif);
3575
3576 a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs =
3577 a6xx_perfcounters_gbif_pwr;
Deepak Kumar84b9e032017-11-08 13:08:50 +05303578 a6xx_perfcounter_groups[
3579 KGSL_PERFCOUNTER_GROUP_VBIF_PWR].reg_count
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05303580 = ARRAY_SIZE(a6xx_perfcounters_gbif_pwr);
3581
3582 gpudev->vbif_xin_halt_ctrl0_mask =
3583 A6XX_GBIF_HALT_MASK;
3584 } else
3585 gpudev->vbif_xin_halt_ctrl0_mask =
3586 A6XX_VBIF_XIN_HALT_CTRL0_MASK;
Rajesh Kemisetti10bbec92017-10-20 10:55:58 +05303587
3588 /* Check efuse bits for various capabilties */
3589 a6xx_check_features(adreno_dev);
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05303590}
3591
3592
Harshdeep Dhatt6ba7a942017-08-21 17:53:52 -06003593static unsigned int a6xx_ccu_invalidate(struct adreno_device *adreno_dev,
3594 unsigned int *cmds)
3595{
3596 /* CCU_INVALIDATE_DEPTH */
3597 *cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
3598 *cmds++ = 24;
3599
3600 /* CCU_INVALIDATE_COLOR */
3601 *cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
3602 *cmds++ = 25;
3603
3604 return 4;
3605}
3606
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003607/* Register offset defines for A6XX, in order of enum adreno_regs */
3608static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
3609
3610 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A6XX_CP_RB_BASE),
Shrenuj Bansal41665402016-12-16 15:25:54 -08003611 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A6XX_CP_RB_BASE_HI),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003612 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
3613 A6XX_CP_RB_RPTR_ADDR_LO),
3614 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
3615 A6XX_CP_RB_RPTR_ADDR_HI),
3616 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A6XX_CP_RB_RPTR),
3617 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A6XX_CP_RB_WPTR),
3618 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A6XX_CP_RB_CNTL),
Shrenuj Bansal41665402016-12-16 15:25:54 -08003619 ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A6XX_CP_SQE_CNTL),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003620 ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A6XX_CP_MISC_CNTL),
Carter Cooper8567af02017-03-15 14:22:03 -06003621 ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A6XX_CP_HW_FAULT),
Shrenuj Bansal41665402016-12-16 15:25:54 -08003622 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A6XX_CP_IB1_BASE),
3623 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE_HI, A6XX_CP_IB1_BASE_HI),
3624 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A6XX_CP_IB1_REM_SIZE),
3625 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A6XX_CP_IB2_BASE),
3626 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE_HI, A6XX_CP_IB2_BASE_HI),
3627 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A6XX_CP_IB2_REM_SIZE),
3628 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_ADDR, A6XX_CP_ROQ_DBG_ADDR),
3629 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A6XX_CP_ROQ_DBG_DATA),
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06003630 ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT, A6XX_CP_CONTEXT_SWITCH_CNTL),
3631 ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
3632 A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO),
3633 ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
3634 A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI),
Harshdeep Dhatt59a69572017-11-01 14:46:13 -06003635 ADRENO_REG_DEFINE(
3636 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
3637 A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO),
3638 ADRENO_REG_DEFINE(
3639 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
3640 A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI),
3641 ADRENO_REG_DEFINE(
3642 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
3643 A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO),
3644 ADRENO_REG_DEFINE(
3645 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
3646 A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI),
3647 ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
3648 A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO),
3649 ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
3650 A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI),
Harshdeep Dhatt003f6cf2017-12-14 11:00:22 -07003651 ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_LEVEL_STATUS,
3652 A6XX_CP_CONTEXT_SWITCH_LEVEL_STATUS),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003653 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
3654 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
Lynus Vaz107d2892017-03-01 13:48:06 +05303655 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
3656 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
3657 A6XX_RBBM_PERFCTR_LOAD_CMD0),
3658 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
3659 A6XX_RBBM_PERFCTR_LOAD_CMD1),
3660 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
3661 A6XX_RBBM_PERFCTR_LOAD_CMD2),
3662 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
3663 A6XX_RBBM_PERFCTR_LOAD_CMD3),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003664
3665 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A6XX_RBBM_INT_0_MASK),
3666 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A6XX_RBBM_INT_0_STATUS),
3667 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_CLOCK_CTL, A6XX_RBBM_CLOCK_CNTL),
3668 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
3669 A6XX_RBBM_INT_CLEAR_CMD),
3670 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A6XX_RBBM_SW_RESET_CMD),
3671 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD,
3672 A6XX_RBBM_BLOCK_SW_RESET_CMD),
3673 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
3674 A6XX_RBBM_BLOCK_SW_RESET_CMD2),
Lynus Vaz107d2892017-03-01 13:48:06 +05303675 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
3676 A6XX_RBBM_PERFCTR_LOAD_VALUE_LO),
3677 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
3678 A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003679 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION, A6XX_VBIF_VERSION),
Carter Cooperafc85912017-03-20 09:39:18 -06003680 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
3681 A6XX_VBIF_XIN_HALT_CTRL0),
3682 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
3683 A6XX_VBIF_XIN_HALT_CTRL1),
Rajesh Kemisettid1ca9542017-10-18 15:35:41 +05303684 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_GPR0_CNTL, A6XX_RBBM_GPR0_CNTL),
3685 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
3686 A6XX_RBBM_VBIF_GX_RESET_STATUS),
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05303687 ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT, A6XX_GBIF_HALT),
3688 ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT_ACK, A6XX_GBIF_HALT_ACK),
Kyle Pieferb1027b02017-02-10 13:58:58 -08003689 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
3690 A6XX_GMU_ALWAYS_ON_COUNTER_L),
3691 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
3692 A6XX_GMU_ALWAYS_ON_COUNTER_H),
Kyle Pieferda0fa542017-08-04 13:39:40 -07003693 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
3694 A6XX_GMU_AO_AHB_FENCE_CTRL),
Kyle Pieferb1027b02017-02-10 13:58:58 -08003695 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_INTERRUPT_EN,
3696 A6XX_GMU_AO_INTERRUPT_EN),
Kyle Piefere7b06b42017-04-06 13:53:01 -07003697 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
3698 A6XX_GMU_AO_HOST_INTERRUPT_CLR),
3699 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
3700 A6XX_GMU_AO_HOST_INTERRUPT_STATUS),
3701 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
3702 A6XX_GMU_AO_HOST_INTERRUPT_MASK),
Kyle Pieferb1027b02017-02-10 13:58:58 -08003703 ADRENO_REG_DEFINE(ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
3704 A6XX_GMU_GMU_PWR_COL_KEEPALIVE),
3705 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AHB_FENCE_STATUS,
3706 A6XX_GMU_AHB_FENCE_STATUS),
3707 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_CTRL_STATUS,
3708 A6XX_GMU_HFI_CTRL_STATUS),
3709 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_VERSION_INFO,
3710 A6XX_GMU_HFI_VERSION_INFO),
3711 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_SFR_ADDR,
3712 A6XX_GMU_HFI_SFR_ADDR),
3713 ADRENO_REG_DEFINE(ADRENO_REG_GMU_RPMH_POWER_STATE,
George Shenf2d4e052017-05-11 16:28:23 -07003714 A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE),
Kyle Pieferb1027b02017-02-10 13:58:58 -08003715 ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
3716 A6XX_GMU_GMU2HOST_INTR_CLR),
3717 ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
3718 A6XX_GMU_GMU2HOST_INTR_INFO),
Kyle Piefere7b06b42017-04-06 13:53:01 -07003719 ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
3720 A6XX_GMU_GMU2HOST_INTR_MASK),
Kyle Pieferb1027b02017-02-10 13:58:58 -08003721 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_SET,
3722 A6XX_GMU_HOST2GMU_INTR_SET),
3723 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
3724 A6XX_GMU_HOST2GMU_INTR_CLR),
3725 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
3726 A6XX_GMU_HOST2GMU_INTR_RAW_INFO),
George Shen6927d8f2017-07-19 11:38:10 -07003727 ADRENO_REG_DEFINE(ADRENO_REG_GMU_NMI_CONTROL_STATUS,
3728 A6XX_GMU_NMI_CONTROL_STATUS),
3729 ADRENO_REG_DEFINE(ADRENO_REG_GMU_CM3_CFG,
3730 A6XX_GMU_CM3_CFG),
Carter Cooper4a313ae2017-02-23 11:11:56 -07003731 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
3732 A6XX_RBBM_SECVID_TRUST_CNTL),
3733 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
3734 A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO),
3735 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
3736 A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI),
3737 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
3738 A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
3739 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
3740 A6XX_RBBM_SECVID_TSB_CNTL),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003741};
3742
3743static const struct adreno_reg_offsets a6xx_reg_offsets = {
3744 .offsets = a6xx_register_offsets,
3745 .offset_0 = ADRENO_REG_REGISTER_MAX,
3746};
3747
Tarun Karra1382e512017-10-30 19:41:25 -07003748static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
3749 struct adreno_perfcount_register *reg, bool update_reg)
3750{
3751 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
3752 struct cpu_gpu_lock *lock = adreno_dev->pwrup_reglist.hostptr;
3753 struct reg_list_pair *reg_pair = (struct reg_list_pair *)(lock + 1);
3754 unsigned int i;
3755 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
3756 int ret = 0;
3757
3758 lock->flag_kmd = 1;
3759 /* Write flag_kmd before turn */
3760 wmb();
3761 lock->turn = 0;
3762 /* Write these fields before looping */
3763 mb();
3764
3765 /*
3766 * Spin here while GPU ucode holds the lock, lock->flag_ucode will
3767 * be set to 0 after GPU ucode releases the lock. Minimum wait time
3768 * is 1 second and this should be enough for GPU to release the lock
3769 */
3770 while (lock->flag_ucode == 1 && lock->turn == 0) {
3771 cpu_relax();
3772 /* Get the latest updates from GPU */
3773 rmb();
3774 /*
3775 * Make sure we wait at least 1sec for the lock,
3776 * if we did not get it after 1sec return an error.
3777 */
3778 if (time_after(jiffies, timeout) &&
3779 (lock->flag_ucode == 1 && lock->turn == 0)) {
3780 ret = -EBUSY;
3781 goto unlock;
3782 }
3783 }
3784
3785 /* Read flag_ucode and turn before list_length */
3786 rmb();
3787 /*
3788 * If the perfcounter select register is already present in reglist
3789 * update it, otherwise append the <select register, value> pair to
3790 * the end of the list.
3791 */
3792 for (i = 0; i < lock->list_length >> 1; i++)
3793 if (reg_pair[i].offset == reg->select)
3794 break;
3795
3796 reg_pair[i].offset = reg->select;
3797 reg_pair[i].val = reg->countable;
3798 if (i == lock->list_length >> 1)
3799 lock->list_length += 2;
3800
3801 if (update_reg)
3802 kgsl_regwrite(device, reg->select, reg->countable);
3803
3804unlock:
3805 /* All writes done before releasing the lock */
3806 wmb();
3807 lock->flag_kmd = 0;
3808 return ret;
3809}
3810
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003811struct adreno_gpudev adreno_a6xx_gpudev = {
3812 .reg_offsets = &a6xx_reg_offsets,
3813 .start = a6xx_start,
Shrenuj Bansal41665402016-12-16 15:25:54 -08003814 .snapshot = a6xx_snapshot,
Carter Cooperb88b7082017-09-14 09:03:26 -06003815 .snapshot_gmu = a6xx_snapshot_gmu,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003816 .irq = &a6xx_irq,
Shrenuj Bansal41665402016-12-16 15:25:54 -08003817 .snapshot_data = &a6xx_snapshot_data,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003818 .irq_trace = trace_kgsl_a5xx_irq_status,
3819 .num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
3820 .platform_setup = a6xx_platform_setup,
Shrenuj Bansal41665402016-12-16 15:25:54 -08003821 .init = a6xx_init,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003822 .rb_start = a6xx_rb_start,
3823 .regulator_enable = a6xx_sptprac_enable,
3824 .regulator_disable = a6xx_sptprac_disable,
Lynus Vaz107d2892017-03-01 13:48:06 +05303825 .perfcounters = &a6xx_perfcounters,
Lynus Vaz856ca602017-05-24 16:56:36 +05303826 .enable_pwr_counters = a6xx_enable_pwr_counters,
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07003827 .count_throttles = a6xx_count_throttles,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003828 .microcode_read = a6xx_microcode_read,
3829 .enable_64bit = a6xx_enable_64bit,
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06003830 .llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07003831 .llc_configure_gpuhtw_scid = a6xx_llc_configure_gpuhtw_scid,
Kyle Piefer11a48b62017-03-17 14:53:40 -07003832 .llc_enable_overrides = a6xx_llc_enable_overrides,
Kyle Pieferb1027b02017-02-10 13:58:58 -08003833 .oob_set = a6xx_oob_set,
3834 .oob_clear = a6xx_oob_clear,
Carter Cooperdf7ba702017-03-20 11:28:04 -06003835 .gpu_keepalive = a6xx_gpu_keepalive,
Kyle Pieferb1027b02017-02-10 13:58:58 -08003836 .rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
Oleg Perelet62d5cec2017-03-27 16:14:52 -07003837 .hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
Kyle Piefer4033f562017-08-16 10:00:48 -07003838 .wait_for_lowest_idle = a6xx_wait_for_lowest_idle,
Lynus Vaz1fde74d2017-03-20 18:02:47 +05303839 .wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
3840 .iommu_fault_block = a6xx_iommu_fault_block,
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07003841 .reset = a6xx_reset,
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07003842 .soft_reset = a6xx_soft_reset,
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06003843 .preemption_pre_ibsubmit = a6xx_preemption_pre_ibsubmit,
3844 .preemption_post_ibsubmit = a6xx_preemption_post_ibsubmit,
3845 .preemption_init = a6xx_preemption_init,
3846 .preemption_schedule = a6xx_preemption_schedule,
Harshdeep Dhattaae850c2017-08-21 17:19:26 -06003847 .set_marker = a6xx_set_marker,
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -06003848 .preemption_context_init = a6xx_preemption_context_init,
3849 .preemption_context_destroy = a6xx_preemption_context_destroy,
Shrenuj Bansald197bf62017-04-07 11:00:09 -07003850 .gx_is_on = a6xx_gx_is_on,
3851 .sptprac_is_on = a6xx_sptprac_is_on,
Harshdeep Dhatt6ba7a942017-08-21 17:53:52 -06003852 .ccu_invalidate = a6xx_ccu_invalidate,
Tarun Karra1382e512017-10-30 19:41:25 -07003853 .perfcounter_update = a6xx_perfcounter_update,
Lokesh Batraa8300e02017-05-25 11:17:40 -07003854 .coresight = {&a6xx_coresight, &a6xx_coresight_cx},
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003855};