blob: 892c80084394ffb49493ace7fb533a488147fca8 [file] [log] [blame]
Oleg Perelet39fead22018-01-08 14:46:17 -08001/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
Carter Cooper4a313ae2017-02-23 11:11:56 -070014#include <soc/qcom/subsystem_restart.h>
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070015#include <linux/pm_opp.h>
Tarun Karra1382e512017-10-30 19:41:25 -070016#include <linux/jiffies.h>
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070017
18#include "adreno.h"
19#include "a6xx_reg.h"
Shrenuj Bansal41665402016-12-16 15:25:54 -080020#include "adreno_a6xx.h"
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070021#include "adreno_cp_parser.h"
22#include "adreno_trace.h"
23#include "adreno_pm4types.h"
24#include "adreno_perfcounter.h"
25#include "adreno_ringbuffer.h"
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060026#include "adreno_llc.h"
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070027#include "kgsl_sharedmem.h"
28#include "kgsl_log.h"
29#include "kgsl.h"
Kyle Pieferb1027b02017-02-10 13:58:58 -080030#include "kgsl_gmu.h"
31#include "kgsl_trace.h"
32
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070033#define MIN_HBB 13
34
Harshdeep Dhatt720394d2017-09-13 14:25:09 -060035#define GPU_LIMIT_THRESHOLD_ENABLE BIT(31)
36
Kyle Pieferb1027b02017-02-10 13:58:58 -080037static int _load_gmu_firmware(struct kgsl_device *device);
38
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070039static const struct adreno_vbif_data a630_vbif[] = {
40 {A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009},
41 {A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
42 {0, 0},
43};
44
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +053045static const struct adreno_vbif_data a615_gbif[] = {
46 {A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
47 {0, 0},
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070048};
49
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +053050static const struct adreno_vbif_platform a6xx_vbif_platforms[] = {
51 { adreno_is_a630, a630_vbif },
52 { adreno_is_a615, a615_gbif },
Deepak Kumar5287eea2018-03-17 14:33:05 +053053 { adreno_is_a616, a615_gbif },
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +053054};
Oleg Pereletcb9b6212017-03-16 15:38:43 -070055
George Shena458dd92018-01-03 14:20:34 -080056
57static unsigned long a6xx_oob_state_bitmask;
58
Oleg Pereletcb9b6212017-03-16 15:38:43 -070059struct kgsl_hwcg_reg {
60 unsigned int off;
61 unsigned int val;
62};
63static const struct kgsl_hwcg_reg a630_hwcg_regs[] = {
Kyle Pieferb16c6072017-10-23 16:08:45 -070064 {A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
65 {A6XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
66 {A6XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
67 {A6XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
George Shen60d2ba52017-06-29 10:45:07 -070068 {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
69 {A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
70 {A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
71 {A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
Kyle Piefercc4371f2017-10-12 15:43:55 -070072 {A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
73 {A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
74 {A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
75 {A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
76 {A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
77 {A6XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
78 {A6XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
79 {A6XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
George Shenc34b9e32017-06-20 11:42:19 -070080 {A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
81 {A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
82 {A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
83 {A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
Oleg Pereletcb9b6212017-03-16 15:38:43 -070084 {A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
85 {A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
86 {A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
87 {A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
88 {A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
89 {A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
90 {A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
91 {A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
92 {A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
93 {A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
94 {A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
95 {A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
96 {A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
97 {A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
98 {A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
99 {A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
100 {A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
101 {A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
102 {A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
103 {A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
Kyle Piefercc4371f2017-10-12 15:43:55 -0700104 {A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
105 {A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
106 {A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
107 {A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700108 {A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
109 {A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
110 {A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
111 {A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
112 {A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
113 {A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
114 {A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
115 {A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
116 {A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
117 {A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
118 {A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
119 {A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
120 {A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
121 {A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
122 {A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
123 {A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
124 {A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
125 {A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
126 {A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
127 {A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
128 {A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
129 {A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
130 {A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
131 {A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
132 {A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
133 {A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
134 {A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
135 {A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
136 {A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
137 {A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
138 {A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
139 {A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
140 {A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
141 {A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
Kyle Piefer0c3e7522017-10-23 15:49:49 -0700142 {A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
143 {A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
144 {A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
145 {A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700146 {A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
147 {A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
148 {A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
149 {A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
Kyle Piefer0c3e7522017-10-23 15:49:49 -0700150 {A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
151 {A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
Kyle Piefercc4371f2017-10-12 15:43:55 -0700152 {A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700153 {A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
Kyle Piefer0c3e7522017-10-23 15:49:49 -0700154 {A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
Kyle Piefer42d20bf2017-10-19 15:35:41 -0700155 {A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700156 {A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
157 {A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
158 {A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
159 {A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
160 {A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
161 {A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
162 {A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
163 {A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
164 {A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
165 {A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
166 {A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
167 {A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
168 {A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
169};
170
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530171static const struct kgsl_hwcg_reg a615_hwcg_regs[] = {
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530172 {A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530173 {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530174 {A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530175 {A6XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530176 {A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
177 {A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530178 {A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
179 {A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
180 {A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
181 {A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
182 {A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
183 {A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
184 {A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
185 {A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
186 {A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
187 {A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
188 {A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
189 {A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
190 {A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
191 {A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
192 {A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
193 {A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
194 {A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
195 {A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
196 {A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
197 {A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
198 {A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
199 {A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
200 {A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
201 {A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
202 {A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
203 {A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
204 {A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
205 {A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
206 {A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
207 {A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
208 {A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
209 {A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
210 {A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
211 {A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
212 {A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040F00},
213 {A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040F00},
214 {A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040F00},
215 {A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040F00},
216 {A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
217 {A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
218 {A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
219 {A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
220 {A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530221 {A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530222 {A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
223 {A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
224 {A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
225 {A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
226 {A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
227 {A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
228 {A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
229 {A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
230 {A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
231 {A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
232 {A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
233 {A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
234 {A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
235};
236
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700237static const struct {
238 int (*devfunc)(struct adreno_device *adreno_dev);
239 const struct kgsl_hwcg_reg *regs;
240 unsigned int count;
241} a6xx_hwcg_registers[] = {
Rajesh Kemisetti8d5cc6e2017-06-06 16:44:17 +0530242 {adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)},
Rajesh Kemisetti04202082017-10-17 14:14:27 +0530243 {adreno_is_a615, a615_hwcg_regs, ARRAY_SIZE(a615_hwcg_regs)},
Deepak Kumar5287eea2018-03-17 14:33:05 +0530244 {adreno_is_a616, a615_hwcg_regs, ARRAY_SIZE(a615_hwcg_regs)},
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700245};
246
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700247static struct a6xx_protected_regs {
248 unsigned int base;
249 unsigned int count;
250 int read_protect;
251} a6xx_protected_regs_group[] = {
252 { 0x600, 0x51, 0 },
253 { 0xAE50, 0x2, 1 },
254 { 0x9624, 0x13, 1 },
255 { 0x8630, 0x8, 1 },
256 { 0x9E70, 0x1, 1 },
257 { 0x9E78, 0x187, 1 },
258 { 0xF000, 0x810, 1 },
259 { 0xFC00, 0x3, 0 },
260 { 0x50E, 0x0, 1 },
261 { 0x50F, 0x0, 0 },
262 { 0x510, 0x0, 1 },
263 { 0x0, 0x4F9, 0 },
264 { 0x501, 0xA, 0 },
265 { 0x511, 0x44, 0 },
Shrenuj Bansal932c8ef2017-08-07 15:16:15 -0700266 { 0xE00, 0x1, 1 },
267 { 0xE03, 0xB, 1 },
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700268 { 0x8E00, 0x0, 1 },
269 { 0x8E50, 0xF, 1 },
270 { 0xBE02, 0x0, 1 },
271 { 0xBE20, 0x11F3, 1 },
272 { 0x800, 0x82, 1 },
273 { 0x8A0, 0x8, 1 },
274 { 0x8AB, 0x19, 1 },
275 { 0x900, 0x4D, 1 },
276 { 0x98D, 0x76, 1 },
277 { 0x8D0, 0x23, 0 },
278 { 0x980, 0x4, 0 },
279 { 0xA630, 0x0, 1 },
280};
281
Tarun Karra4ea68122017-11-02 18:10:31 -0700282/* IFPC & Preemption static powerup restore list */
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600283static struct reg_list_pair {
284 uint32_t offset;
285 uint32_t val;
286} a6xx_pwrup_reglist[] = {
287 { A6XX_VSC_ADDR_MODE_CNTL, 0x0 },
288 { A6XX_GRAS_ADDR_MODE_CNTL, 0x0 },
289 { A6XX_RB_ADDR_MODE_CNTL, 0x0 },
290 { A6XX_PC_ADDR_MODE_CNTL, 0x0 },
291 { A6XX_HLSQ_ADDR_MODE_CNTL, 0x0 },
292 { A6XX_VFD_ADDR_MODE_CNTL, 0x0 },
293 { A6XX_VPC_ADDR_MODE_CNTL, 0x0 },
294 { A6XX_UCHE_ADDR_MODE_CNTL, 0x0 },
295 { A6XX_SP_ADDR_MODE_CNTL, 0x0 },
296 { A6XX_TPL1_ADDR_MODE_CNTL, 0x0 },
297 { A6XX_UCHE_WRITE_RANGE_MAX_LO, 0x0 },
298 { A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0 },
299 { A6XX_UCHE_TRAP_BASE_LO, 0x0 },
300 { A6XX_UCHE_TRAP_BASE_HI, 0x0 },
301 { A6XX_UCHE_WRITE_THRU_BASE_LO, 0x0 },
302 { A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0 },
303 { A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x0 },
304 { A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0 },
305 { A6XX_UCHE_GMEM_RANGE_MAX_LO, 0x0 },
306 { A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0 },
307 { A6XX_UCHE_FILTER_CNTL, 0x0 },
308 { A6XX_UCHE_CACHE_WAYS, 0x0 },
309 { A6XX_UCHE_MODE_CNTL, 0x0 },
310 { A6XX_RB_NC_MODE_CNTL, 0x0 },
311 { A6XX_TPL1_NC_MODE_CNTL, 0x0 },
312 { A6XX_SP_NC_MODE_CNTL, 0x0 },
313 { A6XX_PC_DBG_ECO_CNTL, 0x0 },
314 { A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x0 },
315};
316
Tarun Karra4ea68122017-11-02 18:10:31 -0700317/* IFPC only static powerup restore list */
318static struct reg_list_pair a6xx_ifpc_pwrup_reglist[] = {
319 { A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x0 },
320 { A6XX_CP_CHICKEN_DBG, 0x0 },
Tarun Karra4ea68122017-11-02 18:10:31 -0700321 { A6XX_CP_DBG_ECO_CNTL, 0x0 },
322 { A6XX_CP_PROTECT_CNTL, 0x0 },
323 { A6XX_CP_PROTECT_REG, 0x0 },
324 { A6XX_CP_PROTECT_REG+1, 0x0 },
325 { A6XX_CP_PROTECT_REG+2, 0x0 },
326 { A6XX_CP_PROTECT_REG+3, 0x0 },
327 { A6XX_CP_PROTECT_REG+4, 0x0 },
328 { A6XX_CP_PROTECT_REG+5, 0x0 },
329 { A6XX_CP_PROTECT_REG+6, 0x0 },
330 { A6XX_CP_PROTECT_REG+7, 0x0 },
331 { A6XX_CP_PROTECT_REG+8, 0x0 },
332 { A6XX_CP_PROTECT_REG+9, 0x0 },
333 { A6XX_CP_PROTECT_REG+10, 0x0 },
334 { A6XX_CP_PROTECT_REG+11, 0x0 },
335 { A6XX_CP_PROTECT_REG+12, 0x0 },
336 { A6XX_CP_PROTECT_REG+13, 0x0 },
337 { A6XX_CP_PROTECT_REG+14, 0x0 },
338 { A6XX_CP_PROTECT_REG+15, 0x0 },
339 { A6XX_CP_PROTECT_REG+16, 0x0 },
340 { A6XX_CP_PROTECT_REG+17, 0x0 },
341 { A6XX_CP_PROTECT_REG+18, 0x0 },
342 { A6XX_CP_PROTECT_REG+19, 0x0 },
343 { A6XX_CP_PROTECT_REG+20, 0x0 },
344 { A6XX_CP_PROTECT_REG+21, 0x0 },
345 { A6XX_CP_PROTECT_REG+22, 0x0 },
346 { A6XX_CP_PROTECT_REG+23, 0x0 },
347 { A6XX_CP_PROTECT_REG+24, 0x0 },
348 { A6XX_CP_PROTECT_REG+25, 0x0 },
349 { A6XX_CP_PROTECT_REG+26, 0x0 },
350 { A6XX_CP_PROTECT_REG+27, 0x0 },
351 { A6XX_CP_PROTECT_REG+28, 0x0 },
352 { A6XX_CP_PROTECT_REG+29, 0x0 },
353 { A6XX_CP_PROTECT_REG+30, 0x0 },
354 { A6XX_CP_PROTECT_REG+31, 0x0 },
355 { A6XX_CP_AHB_CNTL, 0x0 },
356};
357
Akhil P Oommen35dde692018-01-16 18:01:09 +0530358static struct reg_list_pair a615_pwrup_reglist[] = {
Deepak Kumarab6b8952017-12-18 11:18:37 +0530359 { A6XX_UCHE_GBIF_GX_CONFIG, 0x0 },
360};
361
Carter Cooper6ce00422017-03-20 11:25:09 -0600362static void _update_always_on_regs(struct adreno_device *adreno_dev)
363{
364 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
365 unsigned int *const regs = gpudev->reg_offsets->offsets;
366
367 regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO] =
368 A6XX_CP_ALWAYS_ON_COUNTER_LO;
369 regs[ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI] =
370 A6XX_CP_ALWAYS_ON_COUNTER_HI;
371}
372
Oleg Perelet39fead22018-01-08 14:46:17 -0800373static uint64_t read_AO_counter(struct kgsl_device *device)
374{
375 unsigned int l, h, h1;
376
377 kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h);
378 kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
379 kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h1);
380
381 if (h == h1)
382 return (uint64_t) l | ((uint64_t) h << 32);
383
384 kgsl_gmu_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
385 return (uint64_t) l | ((uint64_t) h1 << 32);
386}
387
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600388static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev)
389{
390 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
391
392 if (kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
Tarun Karraa6674362017-10-23 12:57:48 -0700393 PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600394 "powerup_register_list")) {
395 adreno_dev->pwrup_reglist.gpuaddr = 0;
396 return;
397 }
398
399 kgsl_sharedmem_set(device, &adreno_dev->pwrup_reglist, 0, 0,
400 PAGE_SIZE);
401}
402
Shrenuj Bansal41665402016-12-16 15:25:54 -0800403static void a6xx_init(struct adreno_device *adreno_dev)
404{
405 a6xx_crashdump_init(adreno_dev);
Carter Cooper6ce00422017-03-20 11:25:09 -0600406
407 /*
408 * If the GMU is not enabled, rewrite the offset for the always on
409 * counters to point to the CP always on instead of GMU always on
410 */
411 if (!kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
412 _update_always_on_regs(adreno_dev);
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600413
414 a6xx_pwrup_reglist_init(adreno_dev);
Shrenuj Bansal41665402016-12-16 15:25:54 -0800415}
416
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700417/**
418 * a6xx_protect_init() - Initializes register protection on a6xx
419 * @device: Pointer to the device structure
420 * Performs register writes to enable protected access to sensitive
421 * registers
422 */
423static void a6xx_protect_init(struct adreno_device *adreno_dev)
424{
425 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Tarun Karra9f945502017-03-23 12:28:03 -0700426 struct kgsl_protected_registers *mmu_prot =
427 kgsl_mmu_get_prot_regs(&device->mmu);
428 int i, num_sets;
429 int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
430 int max_sets = adreno_dev->gpucore->num_protected_regs;
431 unsigned int mmu_base = 0, mmu_range = 0, cur_range;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700432
433 /* enable access protection to privileged registers */
Harshdeep Dhatt9fc043e2017-04-21 12:06:22 -0600434 kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700435
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530436 if (mmu_prot) {
437 mmu_base = mmu_prot->base;
Lynus Vaz607a42d2018-05-23 20:26:51 +0530438 mmu_range = mmu_prot->range;
Tarun Karra9f945502017-03-23 12:28:03 -0700439 req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530440 }
441
Tarun Karra9f945502017-03-23 12:28:03 -0700442 if (req_sets > max_sets)
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530443 WARN(1, "Size exceeds the num of protection regs available\n");
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530444
Tarun Karra9f945502017-03-23 12:28:03 -0700445 /* Protect GPU registers */
446 num_sets = min_t(unsigned int,
447 ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
448 for (i = 0; i < num_sets; i++) {
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700449 struct a6xx_protected_regs *regs =
450 &a6xx_protected_regs_group[i];
451
452 kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
453 regs->base | (regs->count << 18) |
454 (regs->read_protect << 31));
455 }
456
Tarun Karra9f945502017-03-23 12:28:03 -0700457 /* Protect MMU registers */
458 if (mmu_prot) {
459 while ((i < max_sets) && (mmu_range > 0)) {
460 cur_range = min_t(unsigned int, mmu_range,
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530461 0x2000);
Tarun Karra9f945502017-03-23 12:28:03 -0700462 kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
463 mmu_base | ((cur_range - 1) << 18) | (1 << 31));
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530464
Tarun Karra9f945502017-03-23 12:28:03 -0700465 mmu_base += cur_range;
466 mmu_range -= cur_range;
467 i++;
468 }
Lynus Vaz0955c6c2017-02-20 18:59:44 +0530469 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700470}
471
472static void a6xx_enable_64bit(struct adreno_device *adreno_dev)
473{
474 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
475
476 kgsl_regwrite(device, A6XX_CP_ADDR_MODE_CNTL, 0x1);
477 kgsl_regwrite(device, A6XX_VSC_ADDR_MODE_CNTL, 0x1);
478 kgsl_regwrite(device, A6XX_GRAS_ADDR_MODE_CNTL, 0x1);
479 kgsl_regwrite(device, A6XX_RB_ADDR_MODE_CNTL, 0x1);
480 kgsl_regwrite(device, A6XX_PC_ADDR_MODE_CNTL, 0x1);
481 kgsl_regwrite(device, A6XX_HLSQ_ADDR_MODE_CNTL, 0x1);
482 kgsl_regwrite(device, A6XX_VFD_ADDR_MODE_CNTL, 0x1);
483 kgsl_regwrite(device, A6XX_VPC_ADDR_MODE_CNTL, 0x1);
484 kgsl_regwrite(device, A6XX_UCHE_ADDR_MODE_CNTL, 0x1);
485 kgsl_regwrite(device, A6XX_SP_ADDR_MODE_CNTL, 0x1);
486 kgsl_regwrite(device, A6XX_TPL1_ADDR_MODE_CNTL, 0x1);
487 kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
488}
489
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530490static inline unsigned int
491__get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev)
492{
Deepak Kumar5287eea2018-03-17 14:33:05 +0530493 if (adreno_is_a615(adreno_dev) || adreno_is_a616(adreno_dev))
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530494 return 0x8AA8AA82;
495 else
496 return 0x8AA8AA02;
497}
498
499static inline unsigned int
500__get_gmu_ao_cgc_mode_cntl(struct adreno_device *adreno_dev)
501{
Deepak Kumar5287eea2018-03-17 14:33:05 +0530502 if (adreno_is_a615(adreno_dev) || adreno_is_a616(adreno_dev))
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530503 return 0x00000222;
504 else
Oleg Perelet5d2d28f2018-03-06 17:03:20 -0800505 return 0x00020202;
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530506}
507
508static inline unsigned int
509__get_gmu_ao_cgc_delay_cntl(struct adreno_device *adreno_dev)
510{
Deepak Kumar5287eea2018-03-17 14:33:05 +0530511 if (adreno_is_a615(adreno_dev) || adreno_is_a616(adreno_dev))
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530512 return 0x00000111;
513 else
514 return 0x00010111;
515}
516
517static inline unsigned int
518__get_gmu_ao_cgc_hyst_cntl(struct adreno_device *adreno_dev)
519{
Deepak Kumar5287eea2018-03-17 14:33:05 +0530520 if (adreno_is_a615(adreno_dev) || adreno_is_a616(adreno_dev))
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530521 return 0x00000555;
522 else
523 return 0x00005555;
524}
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700525
526static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
527{
528 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
529 const struct kgsl_hwcg_reg *regs;
Oleg Perelet88e54492017-09-22 11:10:31 -0700530 unsigned int value;
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700531 int i, j;
532
533 if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
Oleg Perelet88e54492017-09-22 11:10:31 -0700534 on = false;
535
536 if (kgsl_gmu_isenabled(device)) {
537 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530538 on ? __get_gmu_ao_cgc_mode_cntl(adreno_dev) : 0);
Oleg Perelet88e54492017-09-22 11:10:31 -0700539 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530540 on ? __get_gmu_ao_cgc_delay_cntl(adreno_dev) : 0);
Oleg Perelet88e54492017-09-22 11:10:31 -0700541 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530542 on ? __get_gmu_ao_cgc_hyst_cntl(adreno_dev) : 0);
Oleg Perelet88e54492017-09-22 11:10:31 -0700543 }
544
545 kgsl_regread(device, A6XX_RBBM_CLOCK_CNTL, &value);
546
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530547 if (value == __get_rbbm_clock_cntl_on(adreno_dev) && on)
Oleg Perelet88e54492017-09-22 11:10:31 -0700548 return;
549
550 if (value == 0 && !on)
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700551 return;
552
553 for (i = 0; i < ARRAY_SIZE(a6xx_hwcg_registers); i++) {
554 if (a6xx_hwcg_registers[i].devfunc(adreno_dev))
555 break;
556 }
557
558 if (i == ARRAY_SIZE(a6xx_hwcg_registers))
559 return;
560
561 regs = a6xx_hwcg_registers[i].regs;
562
563 /* Disable SP clock before programming HWCG registers */
Deepak Kumar9892ba12017-07-07 14:51:11 +0530564 kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700565
566 for (j = 0; j < a6xx_hwcg_registers[i].count; j++)
567 kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
568
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700569 /* Enable SP clock */
570 kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
571
572 /* enable top level HWCG */
Oleg Perelet88e54492017-09-22 11:10:31 -0700573 kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL,
Deepak Kumar4a393ff2017-11-16 13:35:40 +0530574 on ? __get_rbbm_clock_cntl_on(adreno_dev) : 0);
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700575}
576
Oleg Pereletc2ab7f72017-06-22 16:45:57 -0700577#define LM_DEFAULT_LIMIT 6000
578
579static uint32_t lm_limit(struct adreno_device *adreno_dev)
580{
581 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
582
583 if (adreno_dev->lm_limit)
584 return adreno_dev->lm_limit;
585
586 if (of_property_read_u32(device->pdev->dev.of_node, "qcom,lm-limit",
587 &adreno_dev->lm_limit))
588 adreno_dev->lm_limit = LM_DEFAULT_LIMIT;
589
590 return adreno_dev->lm_limit;
591}
592
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600593static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
594{
595 uint32_t i;
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600596 struct cpu_gpu_lock *lock;
Tarun Karra4ea68122017-11-02 18:10:31 -0700597 struct reg_list_pair *r;
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600598
599 /* Set up the register values */
Tarun Karra4ea68122017-11-02 18:10:31 -0700600 for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_pwrup_reglist); i++) {
601 r = &a6xx_ifpc_pwrup_reglist[i];
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600602 kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
603 }
604
Tarun Karra4ea68122017-11-02 18:10:31 -0700605 for (i = 0; i < ARRAY_SIZE(a6xx_pwrup_reglist); i++) {
606 r = &a6xx_pwrup_reglist[i];
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600607 kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
608 }
609
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600610 lock = (struct cpu_gpu_lock *) adreno_dev->pwrup_reglist.hostptr;
611 lock->flag_ucode = 0;
612 lock->flag_kmd = 0;
613 lock->turn = 0;
614
615 /*
616 * The overall register list is composed of
617 * 1. Static IFPC-only registers
618 * 2. Static IFPC + preemption registers
619 * 2. Dynamic IFPC + preemption registers (ex: perfcounter selects)
620 *
621 * The CP views the second and third entries as one dynamic list
622 * starting from list_offset. Thus, list_length should be the sum
623 * of all three lists above (of which the third list will start off
624 * empty). And list_offset should be specified as the size in dwords
625 * of the static IFPC-only register list.
626 */
Tarun Karra4ea68122017-11-02 18:10:31 -0700627 lock->list_length = (sizeof(a6xx_ifpc_pwrup_reglist) +
Akhil P Oommen35dde692018-01-16 18:01:09 +0530628 sizeof(a6xx_pwrup_reglist)) >> 2;
629 lock->list_offset = sizeof(a6xx_ifpc_pwrup_reglist) >> 2;
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600630
Akhil P Oommen35dde692018-01-16 18:01:09 +0530631 memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
Tarun Karra4ea68122017-11-02 18:10:31 -0700632 a6xx_ifpc_pwrup_reglist, sizeof(a6xx_ifpc_pwrup_reglist));
633 memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
Akhil P Oommen35dde692018-01-16 18:01:09 +0530634 + sizeof(a6xx_ifpc_pwrup_reglist), a6xx_pwrup_reglist,
635 sizeof(a6xx_pwrup_reglist));
636
Deepak Kumar5287eea2018-03-17 14:33:05 +0530637 if (adreno_is_a615(adreno_dev) || adreno_is_a616(adreno_dev)) {
Akhil P Oommen35dde692018-01-16 18:01:09 +0530638 for (i = 0; i < ARRAY_SIZE(a615_pwrup_reglist); i++) {
639 r = &a615_pwrup_reglist[i];
640 kgsl_regread(KGSL_DEVICE(adreno_dev),
641 r->offset, &r->val);
642 }
643
644 memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
645 + sizeof(a6xx_ifpc_pwrup_reglist)
646 + sizeof(a6xx_pwrup_reglist), a615_pwrup_reglist,
647 sizeof(a615_pwrup_reglist));
648
649 lock->list_length += sizeof(a615_pwrup_reglist);
650 }
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600651}
652
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700653/*
654 * a6xx_start() - Device start
655 * @adreno_dev: Pointer to adreno device
656 *
657 * a6xx device start
658 */
659static void a6xx_start(struct adreno_device *adreno_dev)
660{
661 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700662 unsigned int bit, mal, mode, glbl_inv;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700663 unsigned int amsbc = 0;
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600664 static bool patch_reglist;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700665
Oleg Perelet62d5cec2017-03-27 16:14:52 -0700666 /* runtime adjust callbacks based on feature sets */
667 if (!kgsl_gmu_isenabled(device))
668 /* Legacy idle management if gmu is disabled */
669 ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
Oleg Pereletcb9b6212017-03-16 15:38:43 -0700670 /* enable hardware clockgating */
671 a6xx_hwcg_set(adreno_dev, true);
Oleg Perelet62d5cec2017-03-27 16:14:52 -0700672
Oleg Pereletc2ab7f72017-06-22 16:45:57 -0700673 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM))
674 adreno_dev->lm_threshold_count = A6XX_GMU_GENERAL_1;
675
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700676 adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
677 ARRAY_SIZE(a6xx_vbif_platforms));
Harshdeep Dhatt75dbd412017-05-16 17:12:27 -0600678
Deepak Kumar9cd40032017-12-27 13:02:10 +0530679 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW))
680 kgsl_regwrite(device, A6XX_UCHE_GBIF_GX_CONFIG, 0x10200F9);
681
Harshdeep Dhatt75dbd412017-05-16 17:12:27 -0600682 /* Make all blocks contribute to the GPU BUSY perf counter */
683 kgsl_regwrite(device, A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
684
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700685 /*
686 * Set UCHE_WRITE_THRU_BASE to the UCHE_TRAP_BASE effectively
687 * disabling L2 bypass
688 */
689 kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
690 kgsl_regwrite(device, A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
691 kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
692 kgsl_regwrite(device, A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
693 kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
694 kgsl_regwrite(device, A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
695
696 /* Program the GMEM VA range for the UCHE path */
697 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_LO,
698 ADRENO_UCHE_GMEM_BASE);
699 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
700 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_LO,
701 ADRENO_UCHE_GMEM_BASE +
702 adreno_dev->gmem_size - 1);
703 kgsl_regwrite(device, A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
704
705 kgsl_regwrite(device, A6XX_UCHE_FILTER_CNTL, 0x804);
706 kgsl_regwrite(device, A6XX_UCHE_CACHE_WAYS, 0x4);
707
708 kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x010000C0);
709 kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
710
711 /* Setting the mem pool size */
712 kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 128);
713
714 /* Setting the primFifo thresholds default values */
715 kgsl_regwrite(device, A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
716
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700717 /* Set the AHB default slave response to "ERROR" */
718 kgsl_regwrite(device, A6XX_CP_AHB_CNTL, 0x1);
719
Harshdeep Dhatt859f3d62017-04-28 17:54:33 -0600720 /* Turn on performance counters */
721 kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);
722
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700723 if (of_property_read_u32(device->pdev->dev.of_node,
724 "qcom,highest-bank-bit", &bit))
725 bit = MIN_HBB;
726
727 if (of_property_read_u32(device->pdev->dev.of_node,
728 "qcom,min-access-length", &mal))
729 mal = 32;
730
731 if (of_property_read_u32(device->pdev->dev.of_node,
732 "qcom,ubwc-mode", &mode))
733 mode = 0;
734
735 switch (mode) {
736 case KGSL_UBWC_1_0:
737 mode = 1;
738 break;
739 case KGSL_UBWC_2_0:
740 mode = 0;
741 break;
742 case KGSL_UBWC_3_0:
743 mode = 0;
744 amsbc = 1; /* Only valid for A640 and A680 */
745 break;
746 default:
747 break;
748 }
749
750 if (bit >= 13 && bit <= 16)
751 bit = (bit - 13) & 0x03;
752 else
753 bit = 0;
754
755 mal = (mal == 64) ? 1 : 0;
756
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700757 /* (1 << 29)globalInvFlushFilterDis bit needs to be set for A630 V1 */
758 glbl_inv = (adreno_is_a630v1(adreno_dev)) ? 1 : 0;
759
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700760 kgsl_regwrite(device, A6XX_RB_NC_MODE_CNTL, (amsbc << 4) | (mal << 3) |
761 (bit << 1) | mode);
762 kgsl_regwrite(device, A6XX_TPL1_NC_MODE_CNTL, (mal << 3) |
763 (bit << 1) | mode);
764 kgsl_regwrite(device, A6XX_SP_NC_MODE_CNTL, (mal << 3) | (bit << 1) |
765 mode);
766
Shrenuj Bansal397e5892017-03-13 13:38:47 -0700767 kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (glbl_inv << 29) |
768 (mal << 23) | (bit << 21));
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700769
Carter Cooperf43f2582017-08-17 17:07:42 -0600770 /* Set hang detection threshold to 0x1FFFFF * 16 cycles */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700771 kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
Carter Cooperf43f2582017-08-17 17:07:42 -0600772 (1 << 30) | 0x1fffff);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700773
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530774 kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
775
Lynus Vaz85c8cee2017-03-07 11:31:02 +0530776 /* Set TWOPASSUSEWFI in A6XX_PC_DBG_ECO_CNTL if requested */
777 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
778 kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
779
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600780 /* Enable the GMEM save/restore feature for preemption */
Harshdeep Dhatt7ee8a862017-11-20 17:51:54 -0700781 if (adreno_is_preemption_enabled(adreno_dev))
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600782 kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
783 0x1);
784
Harshdeep Dhatt04d238d2018-02-15 10:58:47 -0700785 a6xx_protect_init(adreno_dev);
786
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600787 if (!patch_reglist && (adreno_dev->pwrup_reglist.gpuaddr != 0)) {
788 a6xx_patch_pwrup_reglist(adreno_dev);
789 patch_reglist = true;
790 }
791
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600792 a6xx_preemption_start(adreno_dev);
Harshdeep Dhatt720394d2017-09-13 14:25:09 -0600793
794 /*
795 * We start LM here because we want all the following to be up
796 * 1. GX HS
797 * 2. SPTPRAC
798 * 3. HFI
799 * At this point, we are guaranteed all.
800 */
801 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
802 test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
Harshdeep Dhattc116c0f2017-09-13 14:45:10 -0600803 int result;
804 struct gmu_device *gmu = &device->gmu;
805 struct device *dev = &gmu->pdev->dev;
806
Harshdeep Dhatt720394d2017-09-13 14:25:09 -0600807 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD,
808 GPU_LIMIT_THRESHOLD_ENABLE | lm_limit(adreno_dev));
809 kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
810 kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL, 0x1);
Harshdeep Dhattc116c0f2017-09-13 14:45:10 -0600811
812 gmu->lm_config.lm_type = 1;
813 gmu->lm_config.lm_sensor_type = 1;
814 gmu->lm_config.throttle_config = 1;
815 gmu->lm_config.idle_throttle_en = 0;
816 gmu->lm_config.acd_en = 0;
817 gmu->bcl_config = 0;
818 gmu->lm_dcvs_level = 0;
819
820 result = hfi_send_lmconfig(gmu);
821 if (result)
822 dev_err(dev, "Failure enabling limits management (%d)\n",
823 result);
Harshdeep Dhatt720394d2017-09-13 14:25:09 -0600824 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700825}
826
827/*
828 * a6xx_microcode_load() - Load microcode
829 * @adreno_dev: Pointer to adreno device
830 */
831static int a6xx_microcode_load(struct adreno_device *adreno_dev)
832{
833 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
834 struct adreno_firmware *fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
835 uint64_t gpuaddr;
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -0600836 void *zap;
Rajesh Kemisetti564ede42018-05-02 15:32:38 +0530837 int ret = 0, zap_retry = 0;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700838
839 gpuaddr = fw->memdesc.gpuaddr;
840 kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_LO,
841 lower_32_bits(gpuaddr));
842 kgsl_regwrite(device, A6XX_CP_SQE_INSTR_BASE_HI,
843 upper_32_bits(gpuaddr));
844
Sunil Khatri3e0fd3e2018-04-12 18:00:07 +0530845 /*
846 * Do not invoke to load zap shader if MMU does
847 * not support secure mode.
848 */
849 if (!device->mmu.secured)
850 return 0;
851
Carter Cooper4a313ae2017-02-23 11:11:56 -0700852 /* Load the zap shader firmware through PIL if its available */
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -0600853 if (adreno_dev->gpucore->zap_name && !adreno_dev->zap_loaded) {
Rajesh Kemisetti564ede42018-05-02 15:32:38 +0530854 /*
855 * subsystem_get() may return -EAGAIN in case system is busy
856 * and unable to load the firmware. So keep trying since this
857 * is not a fatal error.
858 */
859 do {
860 ret = 0;
861 zap = subsystem_get(adreno_dev->gpucore->zap_name);
Carter Cooper4a313ae2017-02-23 11:11:56 -0700862
Rajesh Kemisetti564ede42018-05-02 15:32:38 +0530863 /* Return error if the zap shader cannot be loaded */
864 if (IS_ERR_OR_NULL(zap)) {
865 ret = (zap == NULL) ? -ENODEV : PTR_ERR(zap);
866 zap = NULL;
867 } else
868 adreno_dev->zap_loaded = 1;
869 } while ((ret == -EAGAIN) && (zap_retry++ < ZAP_RETRY_MAX));
Carter Cooper4a313ae2017-02-23 11:11:56 -0700870 }
871
872 return ret;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700873}
874
875
876/*
877 * CP_INIT_MAX_CONTEXT bit tells if the multiple hardware contexts can
878 * be used at once of if they should be serialized
879 */
880#define CP_INIT_MAX_CONTEXT BIT(0)
881
882/* Enables register protection mode */
883#define CP_INIT_ERROR_DETECTION_CONTROL BIT(1)
884
885/* Header dump information */
886#define CP_INIT_HEADER_DUMP BIT(2) /* Reserved */
887
888/* Default Reset states enabled for PFP and ME */
889#define CP_INIT_DEFAULT_RESET_STATE BIT(3)
890
891/* Drawcall filter range */
892#define CP_INIT_DRAWCALL_FILTER_RANGE BIT(4)
893
894/* Ucode workaround masks */
895#define CP_INIT_UCODE_WORKAROUND_MASK BIT(5)
896
Jonathan Wicks20b1df92017-07-31 11:38:32 -0600897/*
898 * Operation mode mask
899 *
900 * This ordinal provides the option to disable the
901 * save/restore of performance counters across preemption.
902 */
903#define CP_INIT_OPERATION_MODE_MASK BIT(6)
904
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600905/* Register initialization list */
906#define CP_INIT_REGISTER_INIT_LIST BIT(7)
907
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600908/* Register initialization list with spinlock */
909#define CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK BIT(8)
910
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700911#define CP_INIT_MASK (CP_INIT_MAX_CONTEXT | \
912 CP_INIT_ERROR_DETECTION_CONTROL | \
913 CP_INIT_HEADER_DUMP | \
914 CP_INIT_DEFAULT_RESET_STATE | \
Jonathan Wicks20b1df92017-07-31 11:38:32 -0600915 CP_INIT_UCODE_WORKAROUND_MASK | \
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600916 CP_INIT_OPERATION_MODE_MASK | \
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600917 CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK)
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700918
919static void _set_ordinals(struct adreno_device *adreno_dev,
920 unsigned int *cmds, unsigned int count)
921{
922 unsigned int *start = cmds;
923
924 /* Enabled ordinal mask */
925 *cmds++ = CP_INIT_MASK;
926
927 if (CP_INIT_MASK & CP_INIT_MAX_CONTEXT)
928 *cmds++ = 0x00000003;
929
930 if (CP_INIT_MASK & CP_INIT_ERROR_DETECTION_CONTROL)
931 *cmds++ = 0x20000000;
932
933 if (CP_INIT_MASK & CP_INIT_HEADER_DUMP) {
934 /* Header dump address */
935 *cmds++ = 0x00000000;
936 /* Header dump enable and dump size */
937 *cmds++ = 0x00000000;
938 }
939
940 if (CP_INIT_MASK & CP_INIT_DRAWCALL_FILTER_RANGE) {
941 /* Start range */
942 *cmds++ = 0x00000000;
943 /* End range (inclusive) */
944 *cmds++ = 0x00000000;
945 }
946
947 if (CP_INIT_MASK & CP_INIT_UCODE_WORKAROUND_MASK)
948 *cmds++ = 0x00000000;
949
Jonathan Wicks20b1df92017-07-31 11:38:32 -0600950 if (CP_INIT_MASK & CP_INIT_OPERATION_MODE_MASK)
951 *cmds++ = 0x00000002;
952
Harshdeep Dhattd373aa52017-08-09 14:13:41 -0600953 if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST_WITH_SPINLOCK) {
954 uint64_t gpuaddr = adreno_dev->pwrup_reglist.gpuaddr;
955
956 *cmds++ = lower_32_bits(gpuaddr);
957 *cmds++ = upper_32_bits(gpuaddr);
958 *cmds++ = 0;
959
960 } else if (CP_INIT_MASK & CP_INIT_REGISTER_INIT_LIST) {
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600961 uint64_t gpuaddr = adreno_dev->pwrup_reglist.gpuaddr;
962
963 *cmds++ = lower_32_bits(gpuaddr);
964 *cmds++ = upper_32_bits(gpuaddr);
965 /* Size is in dwords */
Tarun Karra4ea68122017-11-02 18:10:31 -0700966 *cmds++ = (sizeof(a6xx_ifpc_pwrup_reglist) +
967 sizeof(a6xx_pwrup_reglist)) >> 2;
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600968 }
969
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700970 /* Pad rest of the cmds with 0's */
971 while ((unsigned int)(cmds - start) < count)
972 *cmds++ = 0x0;
973}
974
975/*
976 * a6xx_send_cp_init() - Initialize ringbuffer
977 * @adreno_dev: Pointer to adreno device
978 * @rb: Pointer to the ringbuffer of device
979 *
980 * Submit commands for ME initialization,
981 */
982static int a6xx_send_cp_init(struct adreno_device *adreno_dev,
983 struct adreno_ringbuffer *rb)
984{
Akhil P Oommenda6b11b2018-11-29 21:29:22 +0530985 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700986 unsigned int *cmds;
987 int ret;
988
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600989 cmds = adreno_ringbuffer_allocspace(rb, 12);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700990 if (IS_ERR(cmds))
991 return PTR_ERR(cmds);
992
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600993 *cmds++ = cp_type7_packet(CP_ME_INIT, 11);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700994
Harshdeep Dhattd0f38f62017-06-01 12:45:26 -0600995 _set_ordinals(adreno_dev, cmds, 11);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700996
997 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
Akhil P Oommenda6b11b2018-11-29 21:29:22 +0530998 if (ret) {
Carter Cooper8567af02017-03-15 14:22:03 -0600999 adreno_spin_idle_debug(adreno_dev,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001000 "CP initialization failed to idle\n");
1001
Akhil P Oommenda6b11b2018-11-29 21:29:22 +05301002 if (!adreno_is_a3xx(adreno_dev))
1003 kgsl_sharedmem_writel(device, &device->scratch,
1004 SCRATCH_RPTR_OFFSET(rb->id), 0);
1005 rb->wptr = 0;
1006 rb->_wptr = 0;
1007 }
1008
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001009 return ret;
1010}
1011
1012/*
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06001013 * Follow the ME_INIT sequence with a preemption yield to allow the GPU to move
1014 * to a different ringbuffer, if desired
1015 */
1016static int _preemption_init(struct adreno_device *adreno_dev,
1017 struct adreno_ringbuffer *rb, unsigned int *cmds,
1018 struct kgsl_context *context)
1019{
1020 unsigned int *cmds_orig = cmds;
1021
1022 /* Turn CP protection OFF */
1023 *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
1024 *cmds++ = 0;
1025
1026 *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 6);
1027 *cmds++ = 1;
1028 cmds += cp_gpuaddr(adreno_dev, cmds,
1029 rb->preemption_desc.gpuaddr);
1030
1031 *cmds++ = 2;
Harshdeep Dhatt58b70eb2017-03-28 09:21:40 -06001032 cmds += cp_gpuaddr(adreno_dev, cmds,
1033 rb->secure_preemption_desc.gpuaddr);
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06001034
1035 /* Turn CP protection ON */
1036 *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
1037 *cmds++ = 1;
1038
1039 *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
1040 cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
1041 *cmds++ = 0;
1042 /* generate interrupt on preemption completion */
1043 *cmds++ = 0;
1044
1045 return cmds - cmds_orig;
1046}
1047
1048static int a6xx_post_start(struct adreno_device *adreno_dev)
1049{
1050 int ret;
1051 unsigned int *cmds, *start;
1052 struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
1053 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1054
Harshdeep Dhatt7ee8a862017-11-20 17:51:54 -07001055 if (!adreno_is_preemption_enabled(adreno_dev))
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06001056 return 0;
1057
1058 cmds = adreno_ringbuffer_allocspace(rb, 42);
1059 if (IS_ERR(cmds)) {
1060 KGSL_DRV_ERR(device, "error allocating preemption init cmds");
1061 return PTR_ERR(cmds);
1062 }
1063 start = cmds;
1064
1065 cmds += _preemption_init(adreno_dev, rb, cmds, NULL);
1066
1067 rb->_wptr = rb->_wptr - (42 - (cmds - start));
1068
1069 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
1070 if (ret)
1071 adreno_spin_idle_debug(adreno_dev,
1072 "hw preemption initialization failed to idle\n");
1073
1074 return ret;
1075}
1076
1077/*
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001078 * a6xx_rb_start() - Start the ringbuffer
1079 * @adreno_dev: Pointer to adreno device
1080 * @start_type: Warm or cold start
1081 */
1082static int a6xx_rb_start(struct adreno_device *adreno_dev,
1083 unsigned int start_type)
1084{
1085 struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
1086 struct kgsl_device *device = &adreno_dev->dev;
1087 uint64_t addr;
1088 int ret;
1089
1090 addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
1091
1092 adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
1093 ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
1094
1095 /*
1096 * The size of the ringbuffer in the hardware is the log2
1097 * representation of the size in quadwords (sizedwords / 2).
1098 */
1099 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
1100 A6XX_CP_RB_CNTL_DEFAULT);
1101
Deepak Kumar756d6a92017-11-28 16:58:29 +05301102 adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
1103 ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001104
1105 ret = a6xx_microcode_load(adreno_dev);
1106 if (ret)
1107 return ret;
1108
1109 /* Clear the SQE_HALT to start the CP engine */
1110 kgsl_regwrite(device, A6XX_CP_SQE_CNTL, 1);
1111
Carter Cooper4a313ae2017-02-23 11:11:56 -07001112 ret = a6xx_send_cp_init(adreno_dev, rb);
1113 if (ret)
1114 return ret;
1115
1116 /* GPU comes up in secured mode, make it unsecured by default */
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06001117 ret = adreno_set_unsecured_mode(adreno_dev, rb);
1118 if (ret)
1119 return ret;
1120
1121 return a6xx_post_start(adreno_dev);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001122}
1123
Kyle Pieferedc6c8a2017-11-10 14:51:58 -08001124unsigned int a6xx_set_marker(
1125 unsigned int *cmds, enum adreno_cp_marker_type type)
1126{
1127 unsigned int cmd = 0;
1128
1129 *cmds++ = cp_type7_packet(CP_SET_MARKER, 1);
1130
1131 /*
1132 * Indicate the beginning and end of the IB1 list with a SET_MARKER.
1133 * Among other things, this will implicitly enable and disable
1134 * preemption respectively. IFPC can also be disabled and enabled
1135 * with a SET_MARKER. Bit 8 tells the CP the marker is for IFPC.
1136 */
1137 switch (type) {
1138 case IFPC_DISABLE:
1139 cmd = 0x101;
1140 break;
1141 case IFPC_ENABLE:
1142 cmd = 0x100;
1143 break;
1144 case IB1LIST_START:
1145 cmd = 0xD;
1146 break;
1147 case IB1LIST_END:
1148 cmd = 0xE;
1149 break;
1150 }
1151
1152 *cmds++ = cmd;
1153 return 2;
1154}
1155
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001156static int _load_firmware(struct kgsl_device *device, const char *fwfile,
1157 struct adreno_firmware *firmware)
1158{
1159 const struct firmware *fw = NULL;
1160 int ret;
1161
1162 ret = request_firmware(&fw, fwfile, device->dev);
1163
1164 if (ret) {
1165 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
1166 fwfile, ret);
1167 return ret;
1168 }
1169
1170 ret = kgsl_allocate_global(device, &firmware->memdesc, fw->size - 4,
1171 KGSL_MEMFLAGS_GPUREADONLY, 0, "ucode");
1172
1173 if (!ret) {
1174 memcpy(firmware->memdesc.hostptr, &fw->data[4], fw->size - 4);
1175 firmware->size = (fw->size - 4) / sizeof(uint32_t);
1176 firmware->version = *(unsigned int *)&fw->data[4];
1177 }
1178
1179 release_firmware(fw);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001180 return ret;
1181}
1182
Kyle Pieferb1027b02017-02-10 13:58:58 -08001183#define RSC_CMD_OFFSET 2
1184#define PDC_CMD_OFFSET 4
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001185
Kyle Pieferb1027b02017-02-10 13:58:58 -08001186static void _regwrite(void __iomem *regbase,
1187 unsigned int offsetwords, unsigned int value)
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001188{
Kyle Pieferb1027b02017-02-10 13:58:58 -08001189 void __iomem *reg;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001190
Kyle Pieferb1027b02017-02-10 13:58:58 -08001191 reg = regbase + (offsetwords << 2);
1192 __raw_writel(value, reg);
1193}
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001194
Kyle Pieferb1027b02017-02-10 13:58:58 -08001195/*
1196 * _load_gmu_rpmh_ucode() - Load the ucode into the GPU PDC/RSC blocks
1197 * PDC and RSC execute GPU power on/off RPMh sequence
1198 * @device: Pointer to KGSL device
1199 */
1200static void _load_gmu_rpmh_ucode(struct kgsl_device *device)
1201{
Kyle Piefer8e377172017-08-10 13:24:09 -07001202 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001203 struct gmu_device *gmu = &device->gmu;
1204
Kyle Piefer8e377172017-08-10 13:24:09 -07001205 /* Disable SDE clock gating */
1206 kgsl_gmu_regwrite(device, A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
1207
Kyle Pieferb1027b02017-02-10 13:58:58 -08001208 /* Setup RSC PDC handshake for sleep and wakeup */
1209 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
1210 kgsl_gmu_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
1211 kgsl_gmu_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
1212 kgsl_gmu_regwrite(device,
1213 A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET, 0);
1214 kgsl_gmu_regwrite(device,
1215 A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET, 0);
1216 kgsl_gmu_regwrite(device,
1217 A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET * 2,
1218 0x80000000);
1219 kgsl_gmu_regwrite(device,
1220 A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET * 2,
1221 0);
1222 kgsl_gmu_regwrite(device, A6XX_RSCC_OVERRIDE_START_ADDR, 0);
1223 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
1224 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
1225 kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
1226
Kyle Piefer8e377172017-08-10 13:24:09 -07001227 /* Enable timestamp event for v1 only */
1228 if (adreno_is_a630v1(adreno_dev))
1229 kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001230
1231 /* Load RSC sequencer uCode for sleep and wakeup */
1232 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0, 0xA7A506A0);
1233 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xA1E6A6E7);
1234 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xA2E081E1);
1235 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xE9A982E2);
1236 kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020E8A8);
1237
1238 /* Load PDC sequencer uCode for power up and power down sequence */
Kyle Piefer8e377172017-08-10 13:24:09 -07001239 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0, 0xFEBEA1E1);
1240 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 1, 0xA5A4A3A2);
1241 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 2, 0x8382A6E0);
1242 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 3, 0xBCE3E284);
1243 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 4, 0x002081FC);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001244
1245 /* Set TCS commands used by PDC sequence for low power modes */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001246 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
1247 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
1248 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CONTROL, 0);
1249 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
1250 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
Kyle Piefer87149182017-10-05 15:01:33 -07001251 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS1_CMD0_DATA, 1);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001252 _regwrite(gmu->pdc_reg_virt,
1253 PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
1254 _regwrite(gmu->pdc_reg_virt,
1255 PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
1256 _regwrite(gmu->pdc_reg_virt,
Kyle Piefer87149182017-10-05 15:01:33 -07001257 PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET, 0x0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001258 _regwrite(gmu->pdc_reg_virt,
1259 PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
1260 _regwrite(gmu->pdc_reg_virt,
1261 PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET * 2, 0x30080);
1262 _regwrite(gmu->pdc_reg_virt,
Kyle Piefer87149182017-10-05 15:01:33 -07001263 PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x0);
1264 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
1265 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
1266 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CONTROL, 0);
1267 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
1268 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
1269 _regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS3_CMD0_DATA, 2);
1270 _regwrite(gmu->pdc_reg_virt,
1271 PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
1272 _regwrite(gmu->pdc_reg_virt,
1273 PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
1274 _regwrite(gmu->pdc_reg_virt,
George Shenfec34f32018-03-05 11:57:19 -08001275 PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x3);
Kyle Piefer87149182017-10-05 15:01:33 -07001276 _regwrite(gmu->pdc_reg_virt,
1277 PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
1278 _regwrite(gmu->pdc_reg_virt,
1279 PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET * 2, 0x30080);
1280 _regwrite(gmu->pdc_reg_virt,
1281 PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x3);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001282
1283 /* Setup GPU PDC */
1284 _regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_START_ADDR, 0);
1285 _regwrite(gmu->pdc_reg_virt, PDC_GPU_ENABLE_PDC, 0x80000001);
1286
1287 /* ensure no writes happen before the uCode is fully written */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001288 wmb();
Kyle Pieferb1027b02017-02-10 13:58:58 -08001289}
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001290
Oleg Perelet3bbc63a2018-01-26 10:05:25 -08001291#define GMU_START_TIMEOUT 100 /* ms */
Kyle Piefere923b7a2017-03-28 17:31:48 -07001292#define GPU_START_TIMEOUT 100 /* ms */
1293#define GPU_RESET_TIMEOUT 1 /* ms */
1294#define GPU_RESET_TIMEOUT_US 10 /* us */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001295
Kyle Pieferb1027b02017-02-10 13:58:58 -08001296/*
1297 * timed_poll_check() - polling *gmu* register at given offset until
1298 * its value changed to match expected value. The function times
1299 * out and returns after given duration if register is not updated
1300 * as expected.
1301 *
1302 * @device: Pointer to KGSL device
1303 * @offset: Register offset
1304 * @expected_ret: expected register value that stops polling
1305 * @timout: number of jiffies to abort the polling
1306 * @mask: bitmask to filter register value to match expected_ret
1307 */
1308static int timed_poll_check(struct kgsl_device *device,
1309 unsigned int offset, unsigned int expected_ret,
1310 unsigned int timeout, unsigned int mask)
1311{
1312 unsigned long t;
1313 unsigned int value;
1314
1315 t = jiffies + msecs_to_jiffies(timeout);
1316
Kyle Pieferd9e09dc2017-05-19 16:34:43 -07001317 do {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001318 kgsl_gmu_regread(device, offset, &value);
1319 if ((value & mask) == expected_ret)
1320 return 0;
George Shen56c9cdb2017-08-25 10:43:32 -07001321 /* Wait 100us to reduce unnecessary AHB bus traffic */
Oleg Perelet7f7f9f52017-10-31 10:02:45 -07001322 usleep_range(10, 100);
Kyle Pieferd9e09dc2017-05-19 16:34:43 -07001323 } while (!time_after(jiffies, t));
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001324
Carter Cooper1ee715a2017-09-07 16:08:38 -06001325 /* Double check one last time */
1326 kgsl_gmu_regread(device, offset, &value);
1327 if ((value & mask) == expected_ret)
1328 return 0;
1329
Kyle Pieferb1027b02017-02-10 13:58:58 -08001330 return -EINVAL;
1331}
1332
1333/*
Kyle Piefer4edfb6b2017-08-03 16:42:09 -07001334 * The lowest 16 bits of this value are the number of XO clock cycles
1335 * for main hysteresis. This is the first hysteresis. Here we set it
Kyle Pieferbfed9162017-10-13 13:29:00 -07001336 * to 0x1680 cycles, or 300 us. The highest 16 bits of this value are
Kyle Piefer4edfb6b2017-08-03 16:42:09 -07001337 * the number of XO clock cycles for short hysteresis. This happens
1338 * after main hysteresis. Here we set it to 0xA cycles, or 0.5 us.
1339 */
Kyle Pieferbfed9162017-10-13 13:29:00 -07001340#define GMU_PWR_COL_HYST 0x000A1680
Kyle Piefer4edfb6b2017-08-03 16:42:09 -07001341
1342/*
Kyle Pieferb1027b02017-02-10 13:58:58 -08001343 * a6xx_gmu_power_config() - Configure and enable GMU's low power mode
1344 * setting based on ADRENO feature flags.
1345 * @device: Pointer to KGSL device
1346 */
1347static void a6xx_gmu_power_config(struct kgsl_device *device)
1348{
1349 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1350 struct gmu_device *gmu = &device->gmu;
1351
Kyle Pieferd3964162017-04-06 15:44:03 -07001352 /* Configure registers for idle setting. The setting is cumulative */
George Shenc4c74262017-05-11 15:37:34 -07001353
George Shen1f312ab2017-08-01 10:53:50 -07001354 /* Disable GMU WB/RB buffer */
1355 kgsl_gmu_regwrite(device, A6XX_GMU_SYS_BUS_CONFIG, 0x1);
1356
George Shenc4c74262017-05-11 15:37:34 -07001357 kgsl_gmu_regwrite(device,
1358 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9C40400);
1359
Kyle Pieferd3964162017-04-06 15:44:03 -07001360 switch (gmu->idle_level) {
1361 case GPU_HW_MIN_VOLT:
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001362 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
1363 MIN_BW_ENABLE_MASK);
1364 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_HYST_CTRL, 0,
1365 MIN_BW_HYST);
Kyle Pieferd3964162017-04-06 15:44:03 -07001366 /* fall through */
1367 case GPU_HW_NAP:
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001368 kgsl_gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL, 0,
1369 HW_NAP_ENABLE_MASK);
Kyle Pieferd3964162017-04-06 15:44:03 -07001370 /* fall through */
1371 case GPU_HW_IFPC:
1372 kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
Kyle Piefer4edfb6b2017-08-03 16:42:09 -07001373 GMU_PWR_COL_HYST);
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001374 kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
Kyle Pieferd3964162017-04-06 15:44:03 -07001375 IFPC_ENABLE_MASK);
1376 /* fall through */
1377 case GPU_HW_SPTP_PC:
1378 kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
Kyle Piefer4edfb6b2017-08-03 16:42:09 -07001379 GMU_PWR_COL_HYST);
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001380 kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
Kyle Pieferd3964162017-04-06 15:44:03 -07001381 SPTP_ENABLE_MASK);
1382 /* fall through */
1383 default:
1384 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001385 }
1386
Kyle Piefer3a5ac092017-04-06 16:05:30 -07001387 /* ACD feature enablement */
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07001388 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
1389 test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001390 kgsl_gmu_regrmw(device, A6XX_GMU_BOOT_KMD_LM_HANDSHAKE, 0,
1391 BIT(10));
Kyle Piefer3a5ac092017-04-06 16:05:30 -07001392
Kyle Pieferb1027b02017-02-10 13:58:58 -08001393 /* Enable RPMh GPU client */
1394 if (ADRENO_FEATURE(adreno_dev, ADRENO_RPMH))
Kyle Pieferdc0706c2017-04-13 13:17:50 -07001395 kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
1396 RPMH_ENABLE_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001397}
1398
1399/*
1400 * a6xx_gmu_start() - Start GMU and wait until FW boot up.
1401 * @device: Pointer to KGSL device
1402 */
1403static int a6xx_gmu_start(struct kgsl_device *device)
1404{
1405 struct gmu_device *gmu = &device->gmu;
1406
Oleg Perelet5d2d28f2018-03-06 17:03:20 -08001407 kgsl_regwrite(device, A6XX_GMU_CX_GMU_WFI_CONFIG, 0x0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001408 /* Write 1 first to make sure the GMU is reset */
1409 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);
1410
1411 /* Make sure putting in reset doesn't happen after clearing */
1412 wmb();
1413
1414 /* Bring GMU out of reset */
1415 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0);
1416 if (timed_poll_check(device,
1417 A6XX_GMU_CM3_FW_INIT_RESULT,
1418 0xBABEFACE,
1419 GMU_START_TIMEOUT,
1420 0xFFFFFFFF)) {
1421 dev_err(&gmu->pdev->dev, "GMU doesn't boot\n");
1422 return -ETIMEDOUT;
1423 }
1424
1425 return 0;
1426}
1427
1428/*
1429 * a6xx_gmu_hfi_start() - Write registers and start HFI.
1430 * @device: Pointer to KGSL device
1431 */
1432static int a6xx_gmu_hfi_start(struct kgsl_device *device)
1433{
1434 struct gmu_device *gmu = &device->gmu;
1435
Kyle Piefere7b06b42017-04-06 13:53:01 -07001436 kgsl_gmu_regrmw(device, A6XX_GMU_GMU2HOST_INTR_MASK,
1437 HFI_IRQ_MSGQ_MASK, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001438 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_CTRL_INIT, 1);
1439
1440 if (timed_poll_check(device,
1441 A6XX_GMU_HFI_CTRL_STATUS,
1442 BIT(0),
1443 GMU_START_TIMEOUT,
1444 BIT(0))) {
1445 dev_err(&gmu->pdev->dev, "GMU HFI init failed\n");
1446 return -ETIMEDOUT;
1447 }
1448
1449 return 0;
1450}
1451
1452/*
1453 * a6xx_oob_set() - Set OOB interrupt to GMU.
1454 * @adreno_dev: Pointer to adreno device
1455 * @set_mask: set_mask is a bitmask that defines a set of OOB
1456 * interrupts to trigger.
1457 * @check_mask: check_mask is a bitmask that provides a set of
1458 * OOB ACK bits. check_mask usually matches set_mask to
1459 * ensure OOBs are handled.
1460 * @clear_mask: After GMU handles a OOB interrupt, GMU driver
1461 * clears the interrupt. clear_mask is a bitmask defines
1462 * a set of OOB interrupts to clear.
1463 */
1464static int a6xx_oob_set(struct adreno_device *adreno_dev,
1465 unsigned int set_mask, unsigned int check_mask,
1466 unsigned int clear_mask)
1467{
1468 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001469 int ret = 0;
1470
George Shena458dd92018-01-03 14:20:34 -08001471 if (!kgsl_gmu_isenabled(device) || !clear_mask)
Kyle Pieferc75922e2017-05-18 15:05:07 -07001472 return 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001473
1474 kgsl_gmu_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, set_mask);
1475
1476 if (timed_poll_check(device,
1477 A6XX_GMU_GMU2HOST_INTR_INFO,
1478 check_mask,
1479 GPU_START_TIMEOUT,
1480 check_mask)) {
1481 ret = -ETIMEDOUT;
George Shen7201a6d2017-11-03 10:39:36 -07001482 WARN(1, "OOB set timed out, mask %x\n", set_mask);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001483 }
1484
1485 kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
1486
George Shena458dd92018-01-03 14:20:34 -08001487 set_bit((fls(clear_mask) - 1), &a6xx_oob_state_bitmask);
1488
Kyle Pieferb1027b02017-02-10 13:58:58 -08001489 trace_kgsl_gmu_oob_set(set_mask);
1490 return ret;
1491}
1492
1493/*
1494 * a6xx_oob_clear() - Clear a previously set OOB request.
1495 * @adreno_dev: Pointer to the adreno device that has the GMU
1496 * @clear_mask: Bitmask that provides the OOB bits to clear
1497 */
1498static inline void a6xx_oob_clear(struct adreno_device *adreno_dev,
1499 unsigned int clear_mask)
1500{
1501 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1502
George Shena458dd92018-01-03 14:20:34 -08001503 if (!kgsl_gmu_isenabled(device) || !clear_mask)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001504 return;
1505
George Shena458dd92018-01-03 14:20:34 -08001506 if (test_and_clear_bit(fls(clear_mask) - 1,
1507 &a6xx_oob_state_bitmask))
1508 kgsl_gmu_regwrite(device,
1509 A6XX_GMU_HOST2GMU_INTR_SET,
1510 clear_mask);
1511
Kyle Pieferb1027b02017-02-10 13:58:58 -08001512 trace_kgsl_gmu_oob_clear(clear_mask);
1513}
1514
Carter Cooperdf7ba702017-03-20 11:28:04 -06001515/*
1516 * a6xx_gpu_keepalive() - GMU reg write to request GPU stays on
1517 * @adreno_dev: Pointer to the adreno device that has the GMU
1518 * @state: State to set: true is ON, false is OFF
1519 */
1520static inline void a6xx_gpu_keepalive(struct adreno_device *adreno_dev,
1521 bool state)
1522{
1523 adreno_write_gmureg(adreno_dev,
1524 ADRENO_REG_GMU_PWR_COL_KEEPALIVE, state);
1525}
1526
Kyle Pieferb1027b02017-02-10 13:58:58 -08001527#define SPTPRAC_POWERON_CTRL_MASK 0x00778000
1528#define SPTPRAC_POWEROFF_CTRL_MASK 0x00778001
1529#define SPTPRAC_POWEROFF_STATUS_MASK BIT(2)
1530#define SPTPRAC_POWERON_STATUS_MASK BIT(3)
1531#define SPTPRAC_CTRL_TIMEOUT 10 /* ms */
Kyle Pieferfa50d3e2017-05-24 12:35:24 -07001532#define A6XX_RETAIN_FF_ENABLE_ENABLE_MASK BIT(11)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001533
1534/*
1535 * a6xx_sptprac_enable() - Power on SPTPRAC
1536 * @adreno_dev: Pointer to Adreno device
1537 */
1538static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
1539{
1540 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1541 struct gmu_device *gmu = &device->gmu;
1542
Kyle Piefer51dc0142017-04-14 12:32:49 -07001543 if (!gmu->pdev)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001544 return -EINVAL;
1545
1546 kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
1547 SPTPRAC_POWERON_CTRL_MASK);
1548
1549 if (timed_poll_check(device,
1550 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS,
1551 SPTPRAC_POWERON_STATUS_MASK,
1552 SPTPRAC_CTRL_TIMEOUT,
1553 SPTPRAC_POWERON_STATUS_MASK)) {
1554 dev_err(&gmu->pdev->dev, "power on SPTPRAC fail\n");
1555 return -EINVAL;
1556 }
1557
1558 return 0;
1559}
1560
1561/*
1562 * a6xx_sptprac_disable() - Power of SPTPRAC
1563 * @adreno_dev: Pointer to Adreno device
1564 */
1565static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
1566{
1567 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1568 struct gmu_device *gmu = &device->gmu;
1569
Kyle Piefer51dc0142017-04-14 12:32:49 -07001570 if (!gmu->pdev)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001571 return;
1572
Kyle Pieferfa50d3e2017-05-24 12:35:24 -07001573 /* Ensure that retention is on */
1574 kgsl_gmu_regrmw(device, A6XX_GPU_CC_GX_GDSCR, 0,
1575 A6XX_RETAIN_FF_ENABLE_ENABLE_MASK);
1576
Kyle Pieferb1027b02017-02-10 13:58:58 -08001577 kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
1578 SPTPRAC_POWEROFF_CTRL_MASK);
1579
1580 if (timed_poll_check(device,
1581 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS,
1582 SPTPRAC_POWEROFF_STATUS_MASK,
1583 SPTPRAC_CTRL_TIMEOUT,
1584 SPTPRAC_POWEROFF_STATUS_MASK))
1585 dev_err(&gmu->pdev->dev, "power off SPTPRAC fail\n");
1586}
1587
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001588#define SPTPRAC_POWER_OFF BIT(2)
1589#define SP_CLK_OFF BIT(4)
1590#define GX_GDSC_POWER_OFF BIT(6)
1591#define GX_CLK_OFF BIT(7)
Oleg Perelet39fead22018-01-08 14:46:17 -08001592#define is_on(val) (!(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF)))
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001593/*
1594 * a6xx_gx_is_on() - Check if GX is on using pwr status register
1595 * @adreno_dev - Pointer to adreno_device
1596 * This check should only be performed if the keepalive bit is set or it
1597 * can be guaranteed that the power state of the GPU will remain unchanged
1598 */
1599static bool a6xx_gx_is_on(struct adreno_device *adreno_dev)
1600{
1601 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1602 unsigned int val;
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001603
1604 if (!kgsl_gmu_isenabled(device))
1605 return true;
1606
1607 kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
Oleg Perelet39fead22018-01-08 14:46:17 -08001608 return is_on(val);
Shrenuj Bansald197bf62017-04-07 11:00:09 -07001609}
1610
1611/*
1612 * a6xx_sptprac_is_on() - Check if SPTP is on using pwr status register
1613 * @adreno_dev - Pointer to adreno_device
1614 * This check should only be performed if the keepalive bit is set or it
1615 * can be guaranteed that the power state of the GPU will remain unchanged
1616 */
1617static bool a6xx_sptprac_is_on(struct adreno_device *adreno_dev)
1618{
1619 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1620 unsigned int val;
1621
1622 if (!kgsl_gmu_isenabled(device))
1623 return true;
1624
1625 kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
1626 return !(val & (SPTPRAC_POWER_OFF | SP_CLK_OFF));
1627}
1628
Kyle Pieferb1027b02017-02-10 13:58:58 -08001629/*
1630 * a6xx_gfx_rail_on() - request GMU to power GPU at given OPP.
1631 * @device: Pointer to KGSL device
1632 *
1633 */
1634static int a6xx_gfx_rail_on(struct kgsl_device *device)
1635{
1636 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1637 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1638 struct gmu_device *gmu = &device->gmu;
1639 struct arc_vote_desc *default_opp;
1640 unsigned int perf_idx;
1641 int ret;
1642
1643 perf_idx = pwr->num_pwrlevels - pwr->default_pwrlevel - 1;
1644 default_opp = &gmu->rpmh_votes.gx_votes[perf_idx];
1645
1646 kgsl_gmu_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
1647 OOB_BOOT_OPTION);
1648 kgsl_gmu_regwrite(device, A6XX_GMU_GX_VOTE_IDX, default_opp->pri_idx);
1649 kgsl_gmu_regwrite(device, A6XX_GMU_MX_VOTE_IDX, default_opp->sec_idx);
1650
1651 ret = a6xx_oob_set(adreno_dev, OOB_BOOT_SLUMBER_SET_MASK,
1652 OOB_BOOT_SLUMBER_CHECK_MASK,
1653 OOB_BOOT_SLUMBER_CLEAR_MASK);
1654
1655 if (ret)
Kyle Piefer247e35c2017-06-08 11:13:11 -07001656 dev_err(&gmu->pdev->dev, "Boot OOB timed out\n");
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001657
1658 return ret;
1659}
1660
Kyle Piefere923b7a2017-03-28 17:31:48 -07001661#define GMU_POWER_STATE_SLUMBER 15
1662
Kyle Pieferb1027b02017-02-10 13:58:58 -08001663/*
1664 * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
1665 * @device: Pointer to KGSL device
1666 */
1667static int a6xx_notify_slumber(struct kgsl_device *device)
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001668{
Kyle Pieferb1027b02017-02-10 13:58:58 -08001669 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1670 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1671 struct gmu_device *gmu = &device->gmu;
1672 int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
1673 int perf_idx = gmu->num_gpupwrlevels - pwr->default_pwrlevel - 1;
1674 int ret, state;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001675
Kyle Piefer247e35c2017-06-08 11:13:11 -07001676 /* Disable the power counter so that the GMU is not busy */
1677 kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
1678
Kyle Pieferf53c1872017-09-11 14:16:43 -07001679 /* Turn off SPTPRAC if we own it */
1680 if (gmu->idle_level < GPU_HW_SPTP_PC)
1681 a6xx_sptprac_disable(adreno_dev);
Kyle Piefer68178ef2017-06-19 16:46:13 -07001682
Kyle Pieferb1027b02017-02-10 13:58:58 -08001683 if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
1684 ret = hfi_notify_slumber(gmu, perf_idx, bus_level);
Kyle Pieferda0fa542017-08-04 13:39:40 -07001685 goto out;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001686 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001687
Kyle Pieferb1027b02017-02-10 13:58:58 -08001688 kgsl_gmu_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
1689 OOB_SLUMBER_OPTION);
Sharat Masetty928bc1d2017-11-13 15:46:55 +05301690 kgsl_gmu_regwrite(device, A6XX_GMU_GX_VOTE_IDX, perf_idx);
1691 kgsl_gmu_regwrite(device, A6XX_GMU_MX_VOTE_IDX, bus_level);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001692
1693 ret = a6xx_oob_set(adreno_dev, OOB_BOOT_SLUMBER_SET_MASK,
1694 OOB_BOOT_SLUMBER_CHECK_MASK,
1695 OOB_BOOT_SLUMBER_CLEAR_MASK);
1696 a6xx_oob_clear(adreno_dev, OOB_BOOT_SLUMBER_CLEAR_MASK);
1697
1698 if (ret)
Kyle Piefer247e35c2017-06-08 11:13:11 -07001699 dev_err(&gmu->pdev->dev, "Notify slumber OOB timed out\n");
Kyle Pieferb1027b02017-02-10 13:58:58 -08001700 else {
George Shenf2d4e052017-05-11 16:28:23 -07001701 kgsl_gmu_regread(device,
1702 A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &state);
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001703 if (state != GPU_HW_SLUMBER) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001704 dev_err(&gmu->pdev->dev,
Kyle Pieferc96ad952017-05-02 13:35:45 -07001705 "Failed to prepare for slumber: 0x%x\n",
1706 state);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001707 ret = -EINVAL;
1708 }
1709 }
1710
Kyle Pieferda0fa542017-08-04 13:39:40 -07001711out:
1712 /* Make sure the fence is in ALLOW mode */
1713 kgsl_gmu_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001714 return ret;
1715}
1716
1717static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
1718{
1719 struct gmu_device *gmu = &device->gmu;
1720 struct device *dev = &gmu->pdev->dev;
George Shen6927d8f2017-07-19 11:38:10 -07001721 int val;
1722
Deepak Kumar0eb0a0c2018-04-24 14:11:53 +05301723 /* Only trigger wakeup sequence if sleep sequence was done earlier */
1724 if (!test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags))
1725 return 0;
1726
George Shen6927d8f2017-07-19 11:38:10 -07001727 kgsl_gmu_regread(device, A6XX_GPU_CC_GX_DOMAIN_MISC, &val);
George Shen683841f2017-10-03 18:12:02 -07001728 if (!(val & 0x1))
1729 dev_err_ratelimited(&gmu->pdev->dev,
1730 "GMEM CLAMP IO not set while GFX rail off\n");
Kyle Pieferb1027b02017-02-10 13:58:58 -08001731
George Shencbb18e22017-05-11 16:04:13 -07001732 /* RSC wake sequence */
1733 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
Kyle Pieferb1027b02017-02-10 13:58:58 -08001734
George Shencbb18e22017-05-11 16:04:13 -07001735 /* Write request before polling */
1736 wmb();
Kyle Pieferb1027b02017-02-10 13:58:58 -08001737
George Shencbb18e22017-05-11 16:04:13 -07001738 if (timed_poll_check(device,
1739 A6XX_GMU_RSCC_CONTROL_ACK,
1740 BIT(1),
1741 GPU_START_TIMEOUT,
1742 BIT(1))) {
1743 dev_err(dev, "Failed to do GPU RSC power on\n");
1744 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001745 }
1746
George Shencbb18e22017-05-11 16:04:13 -07001747 if (timed_poll_check(device,
1748 A6XX_RSCC_SEQ_BUSY_DRV0,
1749 0,
1750 GPU_START_TIMEOUT,
1751 0xFFFFFFFF))
1752 goto error_rsc;
1753
1754 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
1755
Deepak Kumar0eb0a0c2018-04-24 14:11:53 +05301756 /* Clear sleep sequence flag as wakeup sequence is successful */
1757 clear_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags);
1758
Kyle Piefer247e35c2017-06-08 11:13:11 -07001759 /* Enable the power counter because it was disabled before slumber */
1760 kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
1761
Kyle Piefer68178ef2017-06-19 16:46:13 -07001762 return 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001763error_rsc:
1764 dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
Kyle Piefer68178ef2017-06-19 16:46:13 -07001765 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001766}
1767
1768static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
1769{
1770 struct gmu_device *gmu = &device->gmu;
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001771 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1772 int ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001773
Deepak Kumar0eb0a0c2018-04-24 14:11:53 +05301774 if (test_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags))
1775 return 0;
1776
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001777 /* RSC sleep sequence is different on v1 */
1778 if (adreno_is_a630v1(adreno_dev))
1779 kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
1780
Kyle Pieferb1027b02017-02-10 13:58:58 -08001781 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 1);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001782 wmb();
1783
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001784 if (adreno_is_a630v1(adreno_dev))
1785 ret = timed_poll_check(device,
1786 A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0,
1787 BIT(0),
1788 GPU_START_TIMEOUT,
1789 BIT(0));
1790 else
1791 ret = timed_poll_check(device,
1792 A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
1793 BIT(16),
1794 GPU_START_TIMEOUT,
1795 BIT(16));
1796
1797 if (ret) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001798 dev_err(&gmu->pdev->dev, "GPU RSC power off fail\n");
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001799 return -ETIMEDOUT;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001800 }
1801
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001802 /* Read to clear the timestamp valid signal. Don't care what we read. */
1803 if (adreno_is_a630v1(adreno_dev)) {
1804 kgsl_gmu_regread(device,
1805 A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0,
1806 &ret);
1807 kgsl_gmu_regread(device,
1808 A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0,
1809 &ret);
1810 }
1811
Kyle Piefer9e0ac3c2017-05-01 16:34:14 -07001812 kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001813
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07001814 if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
Kyle Piefer3e1f6bc2017-08-10 11:16:19 -07001815 test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07001816 kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001817
Deepak Kumar0eb0a0c2018-04-24 14:11:53 +05301818 set_bit(GMU_RSCC_SLEEP_SEQ_DONE, &gmu->flags);
Kyle Piefer68178ef2017-06-19 16:46:13 -07001819 return 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001820}
1821
1822/*
1823 * a6xx_gmu_fw_start() - set up GMU and start FW
1824 * @device: Pointer to KGSL device
1825 * @boot_state: State of the GMU being started
1826 */
1827static int a6xx_gmu_fw_start(struct kgsl_device *device,
1828 unsigned int boot_state)
1829{
1830 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1831 struct gmu_device *gmu = &device->gmu;
1832 struct gmu_memdesc *mem_addr = gmu->hfi_mem;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001833 int ret, i;
George Shenf453d422017-08-19 21:12:11 -07001834 unsigned int chipid = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001835
Kyle Piefere923b7a2017-03-28 17:31:48 -07001836 switch (boot_state) {
1837 case GMU_COLD_BOOT:
Kyle Pieferb1027b02017-02-10 13:58:58 -08001838 /* Turn on TCM retention */
1839 kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
1840
Kyle Piefer68178ef2017-06-19 16:46:13 -07001841 if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags))
Kyle Pieferb1027b02017-02-10 13:58:58 -08001842 _load_gmu_rpmh_ucode(device);
Deepak Kumar0eb0a0c2018-04-24 14:11:53 +05301843 else {
George Shencbb18e22017-05-11 16:04:13 -07001844 ret = a6xx_rpmh_power_on_gpu(device);
1845 if (ret)
1846 return ret;
1847 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001848
1849 if (gmu->load_mode == TCM_BOOT) {
1850 /* Load GMU image via AHB bus */
1851 for (i = 0; i < MAX_GMUFW_SIZE; i++)
1852 kgsl_gmu_regwrite(device,
1853 A6XX_GMU_CM3_ITCM_START + i,
1854 *((uint32_t *) gmu->fw_image.
1855 hostptr + i));
1856
1857 /* Prevent leaving reset before the FW is written */
1858 wmb();
1859 } else {
1860 dev_err(&gmu->pdev->dev, "Incorrect GMU load mode %d\n",
1861 gmu->load_mode);
1862 return -EINVAL;
1863 }
Kyle Piefere923b7a2017-03-28 17:31:48 -07001864 break;
1865 case GMU_WARM_BOOT:
Kyle Pieferb1027b02017-02-10 13:58:58 -08001866 ret = a6xx_rpmh_power_on_gpu(device);
1867 if (ret)
1868 return ret;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001869 break;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001870 default:
1871 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001872 }
1873
1874 /* Clear init result to make sure we are getting fresh value */
1875 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_FW_INIT_RESULT, 0);
1876 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_BOOT_CONFIG, gmu->load_mode);
1877
1878 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_QTBL_ADDR,
1879 mem_addr->gmuaddr);
1880 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_QTBL_INFO, 1);
1881
1882 kgsl_gmu_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
1883 FENCE_RANGE_MASK);
1884
George Shenf453d422017-08-19 21:12:11 -07001885 /* Pass chipid to GMU FW, must happen before starting GMU */
1886
1887 /* Keep Core and Major bitfields unchanged */
1888 chipid = adreno_dev->chipid & 0xFFFF0000;
1889
1890 /*
1891 * Compress minor and patch version into 8 bits
1892 * Bit 15-12: minor version
1893 * Bit 11-8: patch version
1894 */
1895 chipid = chipid | (ADRENO_CHIPID_MINOR(adreno_dev->chipid) << 12)
1896 | (ADRENO_CHIPID_PATCH(adreno_dev->chipid) << 8);
1897
1898 kgsl_gmu_regwrite(device, A6XX_GMU_HFI_SFR_ADDR, chipid);
1899
Kyle Pieferd3964162017-04-06 15:44:03 -07001900 /* Configure power control and bring the GMU out of reset */
1901 a6xx_gmu_power_config(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001902 ret = a6xx_gmu_start(device);
1903 if (ret)
1904 return ret;
1905
Kyle Piefere923b7a2017-03-28 17:31:48 -07001906 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001907 ret = a6xx_gfx_rail_on(device);
1908 if (ret) {
1909 a6xx_oob_clear(adreno_dev,
1910 OOB_BOOT_SLUMBER_CLEAR_MASK);
1911 return ret;
1912 }
1913 }
1914
Kyle Piefer68178ef2017-06-19 16:46:13 -07001915 if (gmu->idle_level < GPU_HW_SPTP_PC) {
1916 ret = a6xx_sptprac_enable(adreno_dev);
1917 if (ret)
1918 return ret;
1919 }
1920
Kyle Pieferb1027b02017-02-10 13:58:58 -08001921 ret = a6xx_gmu_hfi_start(device);
1922 if (ret)
1923 return ret;
1924
1925 /* Make sure the write to start HFI happens before sending a message */
1926 wmb();
1927 return ret;
1928}
1929
1930/*
1931 * a6xx_gmu_dcvs_nohfi() - request GMU to do DCVS without using HFI
1932 * @device: Pointer to KGSL device
1933 * @perf_idx: Index into GPU performance level table defined in
1934 * HFI DCVS table message
1935 * @bw_idx: Index into GPU b/w table defined in HFI b/w table message
1936 *
1937 */
1938static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
1939 unsigned int perf_idx, unsigned int bw_idx)
1940{
1941 struct hfi_dcvs_cmd dcvs_cmd = {
Kyle Piefere923b7a2017-03-28 17:31:48 -07001942 .ack_type = ACK_NONBLOCK,
Kyle Pieferb1027b02017-02-10 13:58:58 -08001943 .freq = {
1944 .perf_idx = perf_idx,
1945 .clkset_opt = OPTION_AT_LEAST,
1946 },
1947 .bw = {
1948 .bw_idx = bw_idx,
1949 },
1950 };
1951 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1952 struct gmu_device *gmu = &device->gmu;
1953 union gpu_perf_vote vote;
1954 int ret;
1955
Kyle Pieferb1027b02017-02-10 13:58:58 -08001956 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_ACK_OPTION, dcvs_cmd.ack_type);
1957
1958 vote.fvote = dcvs_cmd.freq;
1959 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_PERF_SETTING, vote.raw);
1960
1961 vote.bvote = dcvs_cmd.bw;
1962 kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_BW_SETTING, vote.raw);
1963
1964 ret = a6xx_oob_set(adreno_dev, OOB_DCVS_SET_MASK, OOB_DCVS_CHECK_MASK,
1965 OOB_DCVS_CLEAR_MASK);
1966
1967 if (ret) {
Kyle Piefer247e35c2017-06-08 11:13:11 -07001968 dev_err(&gmu->pdev->dev, "DCVS OOB timed out\n");
Kyle Pieferb1027b02017-02-10 13:58:58 -08001969 goto done;
1970 }
1971
1972 kgsl_gmu_regread(device, A6XX_GMU_DCVS_RETURN, &ret);
1973 if (ret)
1974 dev_err(&gmu->pdev->dev, "OOB DCVS error %d\n", ret);
1975
1976done:
1977 a6xx_oob_clear(adreno_dev, OOB_DCVS_CLEAR_MASK);
1978
1979 return ret;
1980}
1981
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001982static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
1983{
1984 unsigned int reg;
1985
1986 kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
1987 A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg);
George Shencbb18e22017-05-11 16:04:13 -07001988 if (reg & GPUBUSYIGNAHB)
1989 return false;
1990 return true;
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001991}
1992
Oleg Perelet39fead22018-01-08 14:46:17 -08001993static bool idle_trandition_complete(unsigned int idle_level,
1994 unsigned int gmu_power_reg,
1995 unsigned int sptprac_clk_reg)
1996{
1997 if (idle_level != gmu_power_reg)
1998 return false;
1999
2000 switch (idle_level) {
2001 case GPU_HW_IFPC:
2002 if (is_on(sptprac_clk_reg))
2003 return false;
2004 break;
2005 /* other GMU idle levels can be added here */
2006 case GPU_HW_ACTIVE:
2007 default:
2008 break;
2009 }
2010 return true;
2011}
2012
Kyle Piefer4033f562017-08-16 10:00:48 -07002013static int a6xx_wait_for_lowest_idle(struct adreno_device *adreno_dev)
2014{
2015 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2016 struct gmu_device *gmu = &device->gmu;
Oleg Perelet39fead22018-01-08 14:46:17 -08002017 unsigned int reg, reg1;
Kyle Piefer4033f562017-08-16 10:00:48 -07002018 unsigned long t;
Oleg Perelet39fead22018-01-08 14:46:17 -08002019 uint64_t ts1, ts2, ts3;
Kyle Piefer4033f562017-08-16 10:00:48 -07002020
2021 if (!kgsl_gmu_isenabled(device))
2022 return 0;
2023
Oleg Perelet39fead22018-01-08 14:46:17 -08002024 ts1 = read_AO_counter(device);
2025
Kyle Piefer4033f562017-08-16 10:00:48 -07002026 t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
Oleg Perelet39fead22018-01-08 14:46:17 -08002027 do {
2028 kgsl_gmu_regread(device,
2029 A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
2030 kgsl_gmu_regread(device,
2031 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &reg1);
Kyle Piefer4033f562017-08-16 10:00:48 -07002032
Oleg Perelet39fead22018-01-08 14:46:17 -08002033 if (idle_trandition_complete(gmu->idle_level, reg, reg1))
2034 return 0;
Kyle Piefer4033f562017-08-16 10:00:48 -07002035 /* Wait 100us to reduce unnecessary AHB bus traffic */
Oleg Perelet7f7f9f52017-10-31 10:02:45 -07002036 usleep_range(10, 100);
Oleg Perelet39fead22018-01-08 14:46:17 -08002037 } while (!time_after(jiffies, t));
Kyle Piefer4033f562017-08-16 10:00:48 -07002038
Oleg Perelet39fead22018-01-08 14:46:17 -08002039 ts2 = read_AO_counter(device);
Kyle Piefer4033f562017-08-16 10:00:48 -07002040 /* Check one last time */
Kyle Piefer4033f562017-08-16 10:00:48 -07002041
Oleg Perelet39fead22018-01-08 14:46:17 -08002042 kgsl_gmu_regread(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
2043 kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &reg1);
2044
2045 if (idle_trandition_complete(gmu->idle_level, reg, reg1))
2046 return 0;
2047
2048 ts3 = read_AO_counter(device);
2049 WARN(1, "Timeout waiting for lowest idle: %08x %llx %llx %llx %x\n",
2050 reg, ts1, ts2, ts3, reg1);
2051
Kyle Piefer4033f562017-08-16 10:00:48 -07002052 return -ETIMEDOUT;
2053}
2054
Oleg Perelet62d5cec2017-03-27 16:14:52 -07002055static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
Kyle Pieferb1027b02017-02-10 13:58:58 -08002056{
2057 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2058 struct gmu_device *gmu = &device->gmu;
Oleg Perelet5df700d2018-01-26 09:21:47 -08002059 unsigned int status2;
2060 uint64_t ts1;
Kyle Pieferb1027b02017-02-10 13:58:58 -08002061
Oleg Perelet5df700d2018-01-26 09:21:47 -08002062 ts1 = read_AO_counter(device);
Oleg Perelet62d5cec2017-03-27 16:14:52 -07002063 if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
Kyle Piefer5c9478c2017-04-20 15:12:05 -07002064 0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
Kyle Piefer247e35c2017-06-08 11:13:11 -07002065 kgsl_gmu_regread(device,
Kyle Piefer247e35c2017-06-08 11:13:11 -07002066 A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2, &status2);
2067 dev_err(&gmu->pdev->dev,
Oleg Perelet5df700d2018-01-26 09:21:47 -08002068 "GMU not idling: status2=0x%x %llx %llx\n",
2069 status2, ts1, read_AO_counter(device));
Oleg Perelet62d5cec2017-03-27 16:14:52 -07002070 return -ETIMEDOUT;
2071 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08002072
Oleg Perelet62d5cec2017-03-27 16:14:52 -07002073 return 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08002074}
2075
2076/*
2077 * _load_gmu_firmware() - Load the ucode into the GPMU RAM & PDC/RSC
2078 * @device: Pointer to KGSL device
2079 */
2080static int _load_gmu_firmware(struct kgsl_device *device)
2081{
2082 const struct firmware *fw = NULL;
2083 const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2084 struct gmu_device *gmu = &device->gmu;
2085 const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
2086 int image_size, ret = -EINVAL;
2087
2088 /* there is no GMU */
2089 if (!kgsl_gmu_isenabled(device))
2090 return 0;
2091
2092 /* GMU fw already saved and verified so do nothing new */
2093 if (gmu->fw_image.hostptr != 0)
2094 return 0;
2095
2096 if (gpucore->gpmufw_name == NULL)
2097 return -EINVAL;
2098
2099 ret = request_firmware(&fw, gpucore->gpmufw_name, device->dev);
2100 if (ret || fw == NULL) {
2101 KGSL_CORE_ERR("request_firmware (%s) failed: %d\n",
2102 gpucore->gpmufw_name, ret);
2103 return ret;
2104 }
2105
2106 image_size = PAGE_ALIGN(fw->size);
2107
2108 ret = allocate_gmu_image(gmu, image_size);
2109
2110 /* load into shared memory with GMU */
2111 if (!ret)
2112 memcpy(gmu->fw_image.hostptr, fw->data, fw->size);
2113
2114 release_firmware(fw);
2115
2116 return ret;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002117}
2118
2119/*
2120 * a6xx_microcode_read() - Read microcode
2121 * @adreno_dev: Pointer to adreno device
2122 */
2123static int a6xx_microcode_read(struct adreno_device *adreno_dev)
2124{
Lynus Vaz573e5012017-06-20 20:37:50 +05302125 int ret;
2126 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2127 struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
2128
2129 if (sqe_fw->memdesc.hostptr == NULL) {
2130 ret = _load_firmware(device, adreno_dev->gpucore->sqefw_name,
2131 sqe_fw);
2132 if (ret)
2133 return ret;
2134 }
2135
2136 return _load_gmu_firmware(device);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002137}
2138
Rajesh Kemisettid1ca9542017-10-18 15:35:41 +05302139#define GBIF_CX_HALT_MASK BIT(1)
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002140
2141static int a6xx_soft_reset(struct adreno_device *adreno_dev)
2142{
2143 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2144 unsigned int reg;
Shrenuj Bansal13cae372017-06-07 13:34:35 -07002145 unsigned long time;
2146 bool vbif_acked = false;
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002147
2148 /*
2149 * For the soft reset case with GMU enabled this part is done
2150 * by the GMU firmware
2151 */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07002152 if (kgsl_gmu_isenabled(device) &&
2153 !test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv))
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002154 return 0;
2155
2156
2157 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 1);
2158 /*
2159 * Do a dummy read to get a brief read cycle delay for the
2160 * reset to take effect
2161 */
2162 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, &reg);
2163 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
2164
Shrenuj Bansal13cae372017-06-07 13:34:35 -07002165 /* Wait for the VBIF reset ack to complete */
2166 time = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
2167
2168 do {
2169 kgsl_regread(device, A6XX_RBBM_VBIF_GX_RESET_STATUS, &reg);
2170 if ((reg & VBIF_RESET_ACK_MASK) == VBIF_RESET_ACK_MASK) {
2171 vbif_acked = true;
2172 break;
2173 }
2174 cpu_relax();
2175 } while (!time_after(jiffies, time));
2176
2177 if (!vbif_acked)
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002178 return -ETIMEDOUT;
2179
Rajesh Kemisettid1ca9542017-10-18 15:35:41 +05302180 /*
2181 * GBIF GX halt will be released automatically by sw_reset.
2182 * Release GBIF CX halt after sw_reset
2183 */
2184 if (adreno_has_gbif(adreno_dev))
2185 kgsl_regrmw(device, A6XX_GBIF_HALT, GBIF_CX_HALT_MASK, 0);
2186
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002187 a6xx_sptprac_enable(adreno_dev);
2188
2189 return 0;
2190}
2191
Kyle Piefere923b7a2017-03-28 17:31:48 -07002192#define A6XX_STATE_OF_CHILD (BIT(4) | BIT(5))
2193#define A6XX_IDLE_FULL_LLM BIT(0)
2194#define A6XX_WAKEUP_ACK BIT(1)
2195#define A6XX_IDLE_FULL_ACK BIT(0)
2196#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
2197
2198static void a6xx_isense_disable(struct kgsl_device *device)
2199{
2200 unsigned int val;
2201 const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2202
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07002203 if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
2204 !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
Kyle Piefere923b7a2017-03-28 17:31:48 -07002205 return;
2206
2207 kgsl_gmu_regread(device, A6XX_GPU_CS_ENABLE_REG, &val);
2208 if (val) {
2209 kgsl_gmu_regwrite(device, A6XX_GPU_CS_ENABLE_REG, 0);
2210 kgsl_gmu_regwrite(device, A6XX_GMU_ISENSE_CTRL, 0);
2211 }
2212}
2213
2214static int a6xx_llm_glm_handshake(struct kgsl_device *device)
2215{
2216 unsigned int val;
2217 const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2218 struct gmu_device *gmu = &device->gmu;
2219
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07002220 if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
2221 !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
Kyle Piefere923b7a2017-03-28 17:31:48 -07002222 return 0;
2223
2224 kgsl_gmu_regread(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, &val);
2225 if (!(val & A6XX_STATE_OF_CHILD)) {
2226 kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0, BIT(4));
2227 kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0,
2228 A6XX_IDLE_FULL_LLM);
2229 if (timed_poll_check(device, A6XX_GMU_LLM_GLM_SLEEP_STATUS,
2230 A6XX_IDLE_FULL_ACK, GPU_RESET_TIMEOUT,
2231 A6XX_IDLE_FULL_ACK)) {
2232 dev_err(&gmu->pdev->dev, "LLM-GLM handshake failed\n");
2233 return -EINVAL;
2234 }
2235 }
2236
2237 return 0;
2238}
2239
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07002240
2241static void a6xx_count_throttles(struct adreno_device *adreno_dev,
2242 uint64_t adj)
2243{
2244 if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
2245 !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
2246 return;
2247
2248 kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
2249 adreno_dev->lm_threshold_count,
2250 &adreno_dev->lm_threshold_cross);
2251}
2252
Kyle Piefere923b7a2017-03-28 17:31:48 -07002253static int a6xx_complete_rpmh_votes(struct kgsl_device *device)
2254{
2255 int ret = 0;
2256
2257 if (!kgsl_gmu_isenabled(device))
2258 return ret;
2259
2260 ret |= timed_poll_check(device, A6XX_RSCC_TCS0_DRV0_STATUS, BIT(0),
2261 GPU_RESET_TIMEOUT, BIT(0));
2262 ret |= timed_poll_check(device, A6XX_RSCC_TCS1_DRV0_STATUS, BIT(0),
2263 GPU_RESET_TIMEOUT, BIT(0));
2264 ret |= timed_poll_check(device, A6XX_RSCC_TCS2_DRV0_STATUS, BIT(0),
2265 GPU_RESET_TIMEOUT, BIT(0));
2266 ret |= timed_poll_check(device, A6XX_RSCC_TCS3_DRV0_STATUS, BIT(0),
2267 GPU_RESET_TIMEOUT, BIT(0));
2268
2269 return ret;
2270}
2271
2272static int a6xx_gmu_suspend(struct kgsl_device *device)
2273{
2274 /* Max GX clients on A6xx is 2: GMU and KMD */
2275 int ret = 0, max_client_num = 2;
2276 struct gmu_device *gmu = &device->gmu;
2277 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2278
2279 /* do it only if LM feature is enabled */
2280 /* Disable ISENSE if it's on */
2281 a6xx_isense_disable(device);
2282
2283 /* LLM-GLM handshake sequence */
2284 a6xx_llm_glm_handshake(device);
2285
2286 /* If SPTP_RAC is on, turn off SPTP_RAC HS */
2287 a6xx_sptprac_disable(adreno_dev);
2288
George Shenf135a972017-08-24 16:59:42 -07002289 /* Disconnect GPU from BUS is not needed if CX GDSC goes off later */
Kyle Piefere923b7a2017-03-28 17:31:48 -07002290
2291 /* Check no outstanding RPMh voting */
2292 a6xx_complete_rpmh_votes(device);
2293
Kyle Piefer68178ef2017-06-19 16:46:13 -07002294 if (gmu->gx_gdsc) {
Kyle Piefere923b7a2017-03-28 17:31:48 -07002295 if (regulator_is_enabled(gmu->gx_gdsc)) {
2296 /* Switch gx gdsc control from GMU to CPU
2297 * force non-zero reference count in clk driver
2298 * so next disable call will turn
2299 * off the GDSC
2300 */
2301 ret = regulator_enable(gmu->gx_gdsc);
2302 if (ret)
2303 dev_err(&gmu->pdev->dev,
2304 "suspend fail: gx enable\n");
2305
2306 while ((max_client_num)) {
2307 ret = regulator_disable(gmu->gx_gdsc);
2308 if (!regulator_is_enabled(gmu->gx_gdsc))
2309 break;
2310 max_client_num -= 1;
2311 }
2312
2313 if (!max_client_num)
2314 dev_err(&gmu->pdev->dev,
2315 "suspend fail: cannot disable gx\n");
2316 }
2317 }
2318
2319 return ret;
2320}
2321
2322/*
2323 * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
2324 * @adreno_dev: Pointer to adreno device
2325 * @mode: requested power mode
2326 * @arg1: first argument for mode control
2327 * @arg2: second argument for mode control
2328 */
2329static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
2330 unsigned int mode, unsigned int arg1, unsigned int arg2)
2331{
2332 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2333 struct gmu_device *gmu = &device->gmu;
2334 int ret;
2335
2336 switch (mode) {
2337 case GMU_FW_START:
2338 ret = a6xx_gmu_fw_start(device, arg1);
2339 break;
2340 case GMU_SUSPEND:
2341 ret = a6xx_gmu_suspend(device);
2342 break;
2343 case GMU_FW_STOP:
George Shena458dd92018-01-03 14:20:34 -08002344 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
2345 a6xx_oob_clear(adreno_dev,
2346 OOB_BOOT_SLUMBER_CLEAR_MASK);
Kyle Piefere923b7a2017-03-28 17:31:48 -07002347 ret = a6xx_rpmh_power_off_gpu(device);
2348 break;
2349 case GMU_DCVS_NOHFI:
2350 ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
2351 break;
2352 case GMU_NOTIFY_SLUMBER:
2353 ret = a6xx_notify_slumber(device);
2354 break;
2355 default:
2356 dev_err(&gmu->pdev->dev,
2357 "unsupported GMU power ctrl mode:%d\n", mode);
2358 ret = -EINVAL;
2359 break;
2360 }
2361
2362 return ret;
2363}
2364
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07002365/**
2366 * a6xx_reset() - Helper function to reset the GPU
2367 * @device: Pointer to the KGSL device structure for the GPU
2368 * @fault: Type of fault. Needed to skip soft reset for MMU fault
2369 *
2370 * Try to reset the GPU to recover from a fault. First, try to do a low latency
2371 * soft reset. If the soft reset fails for some reason, then bring out the big
2372 * guns and toggle the footswitch.
2373 */
2374static int a6xx_reset(struct kgsl_device *device, int fault)
2375{
2376 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2377 int ret = -EINVAL;
2378 int i = 0;
2379
2380 /* Use the regular reset sequence for No GMU */
2381 if (!kgsl_gmu_isenabled(device))
2382 return adreno_reset(device, fault);
2383
2384 /* Transition from ACTIVE to RESET state */
2385 kgsl_pwrctrl_change_state(device, KGSL_STATE_RESET);
2386
2387 /* Try soft reset first */
2388 if (!(fault & ADRENO_IOMMU_PAGE_FAULT)) {
2389 int acked;
2390
2391 /* NMI */
2392 kgsl_gmu_regwrite(device, A6XX_GMU_NMI_CONTROL_STATUS, 0);
2393 kgsl_gmu_regwrite(device, A6XX_GMU_CM3_CFG, (1 << 9));
2394
2395 for (i = 0; i < 10; i++) {
2396 kgsl_gmu_regread(device,
2397 A6XX_GMU_NMI_CONTROL_STATUS, &acked);
2398
2399 /* NMI FW ACK recevied */
2400 if (acked == 0x1)
2401 break;
2402
2403 udelay(100);
2404 }
2405
Rajesh Kemisettid1ca9542017-10-18 15:35:41 +05302406 if (acked) {
2407 /* Make sure VBIF/GBIF is cleared before resetting */
2408 ret = adreno_vbif_clear_pending_transactions(device);
2409
2410 if (ret == 0)
2411 ret = adreno_soft_reset(device);
2412 }
2413
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07002414 if (ret)
2415 KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
2416 }
2417 if (ret) {
2418 /* If soft reset failed/skipped, then pull the power */
2419 set_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv);
2420 /* since device is officially off now clear start bit */
2421 clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
2422
2423 /* Keep trying to start the device until it works */
2424 for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) {
2425 ret = adreno_start(device, 0);
2426 if (!ret)
2427 break;
2428
2429 msleep(20);
2430 }
2431 }
2432
2433 clear_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv);
2434
2435 if (ret)
2436 return ret;
2437
2438 if (i != 0)
2439 KGSL_DRV_WARN(device, "Device hard reset tried %d tries\n", i);
2440
2441 /*
2442 * If active_cnt is non-zero then the system was active before
2443 * going into a reset - put it back in that state
2444 */
2445
2446 if (atomic_read(&device->active_cnt))
2447 kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
2448 else
2449 kgsl_pwrctrl_change_state(device, KGSL_STATE_NAP);
2450
2451 return ret;
2452}
2453
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002454static void a6xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
2455{
2456 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2457 unsigned int status1, status2;
2458
2459 kgsl_regread(device, A6XX_CP_INTERRUPT_STATUS, &status1);
2460
Shrenuj Bansala602c022017-03-08 10:40:34 -08002461 if (status1 & BIT(A6XX_CP_OPCODE_ERROR)) {
2462 unsigned int opcode;
2463
2464 kgsl_regwrite(device, A6XX_CP_SQE_STAT_ADDR, 1);
2465 kgsl_regread(device, A6XX_CP_SQE_STAT_DATA, &opcode);
2466 KGSL_DRV_CRIT_RATELIMIT(device,
Kyle Piefer2ce06162017-03-15 11:29:08 -07002467 "CP opcode error interrupt | opcode=0x%8.8x\n",
2468 opcode);
Shrenuj Bansala602c022017-03-08 10:40:34 -08002469 }
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002470 if (status1 & BIT(A6XX_CP_UCODE_ERROR))
2471 KGSL_DRV_CRIT_RATELIMIT(device, "CP ucode error interrupt\n");
2472 if (status1 & BIT(A6XX_CP_HW_FAULT_ERROR)) {
2473 kgsl_regread(device, A6XX_CP_HW_FAULT, &status2);
2474 KGSL_DRV_CRIT_RATELIMIT(device,
2475 "CP | Ringbuffer HW fault | status=%x\n",
2476 status2);
2477 }
2478 if (status1 & BIT(A6XX_CP_REGISTER_PROTECTION_ERROR)) {
2479 kgsl_regread(device, A6XX_CP_PROTECT_STATUS, &status2);
2480 KGSL_DRV_CRIT_RATELIMIT(device,
2481 "CP | Protected mode error | %s | addr=%x | status=%x\n",
2482 status2 & (1 << 20) ? "READ" : "WRITE",
Lynus Vazdc807342017-02-20 18:23:25 +05302483 status2 & 0x3FFFF, status2);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002484 }
2485 if (status1 & BIT(A6XX_CP_AHB_ERROR))
2486 KGSL_DRV_CRIT_RATELIMIT(device,
2487 "CP AHB error interrupt\n");
2488 if (status1 & BIT(A6XX_CP_VSD_PARITY_ERROR))
2489 KGSL_DRV_CRIT_RATELIMIT(device,
2490 "CP VSD decoder parity error\n");
2491 if (status1 & BIT(A6XX_CP_ILLEGAL_INSTR_ERROR))
2492 KGSL_DRV_CRIT_RATELIMIT(device,
2493 "CP Illegal instruction error\n");
2494
2495}
2496
2497static void a6xx_err_callback(struct adreno_device *adreno_dev, int bit)
2498{
2499 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2500
2501 switch (bit) {
2502 case A6XX_INT_CP_AHB_ERROR:
2503 KGSL_DRV_CRIT_RATELIMIT(device, "CP: AHB bus error\n");
2504 break;
2505 case A6XX_INT_ATB_ASYNCFIFO_OVERFLOW:
2506 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB ASYNC overflow\n");
2507 break;
2508 case A6XX_INT_RBBM_ATB_BUS_OVERFLOW:
2509 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: ATB bus overflow\n");
2510 break;
2511 case A6XX_INT_UCHE_OOB_ACCESS:
2512 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Out of bounds access\n");
2513 break;
2514 case A6XX_INT_UCHE_TRAP_INTR:
2515 KGSL_DRV_CRIT_RATELIMIT(device, "UCHE: Trap interrupt\n");
2516 break;
2517 default:
2518 KGSL_DRV_CRIT_RATELIMIT(device, "Unknown interrupt %d\n", bit);
2519 }
2520}
2521
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002522/*
2523 * a6xx_llc_configure_gpu_scid() - Program the sub-cache ID for all GPU blocks
2524 * @adreno_dev: The adreno device pointer
2525 */
2526static void a6xx_llc_configure_gpu_scid(struct adreno_device *adreno_dev)
2527{
2528 uint32_t gpu_scid;
2529 uint32_t gpu_cntl1_val = 0;
2530 int i;
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002531
2532 gpu_scid = adreno_llc_get_scid(adreno_dev->gpu_llc_slice);
2533 for (i = 0; i < A6XX_LLC_NUM_GPU_SCIDS; i++)
2534 gpu_cntl1_val = (gpu_cntl1_val << A6XX_GPU_LLC_SCID_NUM_BITS)
2535 | gpu_scid;
2536
Lynus Vaz0925b4a2018-10-03 12:55:21 +05302537 adreno_cx_misc_regrmw(adreno_dev,
2538 A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002539 A6XX_GPU_LLC_SCID_MASK, gpu_cntl1_val);
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002540}
2541
2542/*
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07002543 * a6xx_llc_configure_gpuhtw_scid() - Program the SCID for GPU pagetables
2544 * @adreno_dev: The adreno device pointer
2545 */
2546static void a6xx_llc_configure_gpuhtw_scid(struct adreno_device *adreno_dev)
2547{
2548 uint32_t gpuhtw_scid;
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07002549
2550 gpuhtw_scid = adreno_llc_get_scid(adreno_dev->gpuhtw_llc_slice);
2551
Lynus Vaz0925b4a2018-10-03 12:55:21 +05302552 adreno_cx_misc_regrmw(adreno_dev,
2553 A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_1,
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07002554 A6XX_GPUHTW_LLC_SCID_MASK,
2555 gpuhtw_scid << A6XX_GPUHTW_LLC_SCID_SHIFT);
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07002556}
2557
2558/*
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002559 * a6xx_llc_enable_overrides() - Override the page attributes
2560 * @adreno_dev: The adreno device pointer
2561 */
2562static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
2563{
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002564 /*
2565 * 0x3: readnoallocoverrideen=0
2566 * read-no-alloc=0 - Allocate lines on read miss
2567 * writenoallocoverrideen=1
2568 * write-no-alloc=1 - Do not allocates lines on write miss
2569 */
Lynus Vaz0925b4a2018-10-03 12:55:21 +05302570 adreno_cx_misc_regwrite(adreno_dev,
2571 A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0, 0x3);
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06002572}
2573
Lynus Vaz1fde74d2017-03-20 18:02:47 +05302574static const char *fault_block[8] = {
2575 [0] = "CP",
2576 [1] = "UCHE",
2577 [2] = "VFD",
2578 [3] = "UCHE",
2579 [4] = "CCU",
2580 [5] = "unknown",
2581 [6] = "CDP Prefetch",
2582 [7] = "GPMU",
2583};
2584
2585static const char *uche_client[8] = {
2586 [0] = "VFD",
2587 [1] = "SP",
2588 [2] = "VSC",
2589 [3] = "VPC",
2590 [4] = "HLSQ",
2591 [5] = "PC",
2592 [6] = "LRZ",
2593 [7] = "unknown",
2594};
2595
2596static const char *a6xx_iommu_fault_block(struct adreno_device *adreno_dev,
2597 unsigned int fsynr1)
2598{
2599 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2600 unsigned int client_id;
2601 unsigned int uche_client_id;
2602
2603 client_id = fsynr1 & 0xff;
2604
2605 if (client_id >= ARRAY_SIZE(fault_block))
2606 return "unknown";
2607 else if (client_id != 3)
2608 return fault_block[client_id];
2609
Harshdeep Dhatt3f074a92017-05-01 12:59:01 -06002610 mutex_lock(&device->mutex);
Lynus Vaz1fde74d2017-03-20 18:02:47 +05302611 kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
Harshdeep Dhatt3f074a92017-05-01 12:59:01 -06002612 mutex_unlock(&device->mutex);
2613
Lynus Vaz1fde74d2017-03-20 18:02:47 +05302614 return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
2615}
2616
Harshdeep Dhattd388e522017-07-06 14:30:06 -06002617static void a6xx_cp_callback(struct adreno_device *adreno_dev, int bit)
2618{
2619 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2620
Harshdeep Dhatt7ee8a862017-11-20 17:51:54 -07002621 if (adreno_is_preemption_enabled(adreno_dev))
Harshdeep Dhatt12a642c2017-08-17 12:19:26 -06002622 a6xx_preemption_trigger(adreno_dev);
2623
Harshdeep Dhattd388e522017-07-06 14:30:06 -06002624 adreno_dispatcher_schedule(device);
2625}
2626
Carter Cooperc8d48642017-08-18 10:39:57 -06002627/*
2628 * a6xx_gpc_err_int_callback() - Isr for GPC error interrupts
2629 * @adreno_dev: Pointer to device
2630 * @bit: Interrupt bit
2631 */
2632static void a6xx_gpc_err_int_callback(struct adreno_device *adreno_dev, int bit)
2633{
2634 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2635
2636 /*
2637 * GPC error is typically the result of mistake SW programming.
2638 * Force GPU fault for this interrupt so that we can debug it
2639 * with help of register dump.
2640 */
2641
2642 KGSL_DRV_CRIT_RATELIMIT(device, "RBBM: GPC error\n");
2643 adreno_irqctrl(adreno_dev, 0);
2644
2645 /* Trigger a fault in the dispatcher - this will effect a restart */
2646 adreno_set_gpu_fault(adreno_dev, ADRENO_SOFT_FAULT);
2647 adreno_dispatcher_schedule(device);
2648}
2649
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002650#define A6XX_INT_MASK \
Kyle Pieferb1027b02017-02-10 13:58:58 -08002651 ((1 << A6XX_INT_CP_AHB_ERROR) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002652 (1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) | \
Kyle Pieferb1027b02017-02-10 13:58:58 -08002653 (1 << A6XX_INT_RBBM_GPC_ERROR) | \
2654 (1 << A6XX_INT_CP_SW) | \
2655 (1 << A6XX_INT_CP_HW_ERROR) | \
2656 (1 << A6XX_INT_CP_IB2) | \
2657 (1 << A6XX_INT_CP_IB1) | \
2658 (1 << A6XX_INT_CP_RB) | \
2659 (1 << A6XX_INT_CP_CACHE_FLUSH_TS) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002660 (1 << A6XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
Kyle Pieferb1027b02017-02-10 13:58:58 -08002661 (1 << A6XX_INT_RBBM_HANG_DETECT) | \
2662 (1 << A6XX_INT_UCHE_OOB_ACCESS) | \
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002663 (1 << A6XX_INT_UCHE_TRAP_INTR))
2664
2665static struct adreno_irq_funcs a6xx_irq_funcs[32] = {
2666 ADRENO_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
2667 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 1 - RBBM_AHB_ERROR */
2668 ADRENO_IRQ_CALLBACK(NULL), /* 2 - UNUSED */
2669 ADRENO_IRQ_CALLBACK(NULL), /* 3 - UNUSED */
2670 ADRENO_IRQ_CALLBACK(NULL), /* 4 - UNUSED */
2671 ADRENO_IRQ_CALLBACK(NULL), /* 5 - UNUSED */
2672 /* 6 - RBBM_ATB_ASYNC_OVERFLOW */
2673 ADRENO_IRQ_CALLBACK(a6xx_err_callback),
Carter Cooperc8d48642017-08-18 10:39:57 -06002674 ADRENO_IRQ_CALLBACK(a6xx_gpc_err_int_callback), /* 7 - GPC_ERR */
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06002675 ADRENO_IRQ_CALLBACK(a6xx_preemption_callback),/* 8 - CP_SW */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002676 ADRENO_IRQ_CALLBACK(a6xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */
2677 ADRENO_IRQ_CALLBACK(NULL), /* 10 - CP_CCU_FLUSH_DEPTH_TS */
2678 ADRENO_IRQ_CALLBACK(NULL), /* 11 - CP_CCU_FLUSH_COLOR_TS */
2679 ADRENO_IRQ_CALLBACK(NULL), /* 12 - CP_CCU_RESOLVE_TS */
2680 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 13 - CP_IB2_INT */
2681 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 14 - CP_IB1_INT */
2682 ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 15 - CP_RB_INT */
2683 ADRENO_IRQ_CALLBACK(NULL), /* 16 - UNUSED */
2684 ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
2685 ADRENO_IRQ_CALLBACK(NULL), /* 18 - CP_WT_DONE_TS */
2686 ADRENO_IRQ_CALLBACK(NULL), /* 19 - UNUSED */
Harshdeep Dhattd388e522017-07-06 14:30:06 -06002687 ADRENO_IRQ_CALLBACK(a6xx_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002688 ADRENO_IRQ_CALLBACK(NULL), /* 21 - UNUSED */
2689 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 22 - RBBM_ATB_BUS_OVERFLOW */
2690 /* 23 - MISC_HANG_DETECT */
2691 ADRENO_IRQ_CALLBACK(adreno_hang_int_callback),
2692 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 24 - UCHE_OOB_ACCESS */
2693 ADRENO_IRQ_CALLBACK(a6xx_err_callback), /* 25 - UCHE_TRAP_INTR */
2694 ADRENO_IRQ_CALLBACK(NULL), /* 26 - DEBBUS_INTR_0 */
2695 ADRENO_IRQ_CALLBACK(NULL), /* 27 - DEBBUS_INTR_1 */
2696 ADRENO_IRQ_CALLBACK(NULL), /* 28 - UNUSED */
2697 ADRENO_IRQ_CALLBACK(NULL), /* 29 - UNUSED */
2698 ADRENO_IRQ_CALLBACK(NULL), /* 30 - ISDB_CPU_IRQ */
2699 ADRENO_IRQ_CALLBACK(NULL), /* 31 - ISDB_UNDER_DEBUG */
2700};
2701
2702static struct adreno_irq a6xx_irq = {
2703 .funcs = a6xx_irq_funcs,
2704 .mask = A6XX_INT_MASK,
2705};
2706
Shrenuj Bansal41665402016-12-16 15:25:54 -08002707static struct adreno_snapshot_sizes a6xx_snap_sizes = {
2708 .cp_pfp = 0x33,
2709 .roq = 0x400,
2710};
2711
2712static struct adreno_snapshot_data a6xx_snapshot_data = {
2713 .sect_sizes = &a6xx_snap_sizes,
2714};
2715
Lokesh Batraa8300e02017-05-25 11:17:40 -07002716static struct adreno_coresight_register a6xx_coresight_regs[] = {
2717 { A6XX_DBGC_CFG_DBGBUS_SEL_A },
2718 { A6XX_DBGC_CFG_DBGBUS_SEL_B },
2719 { A6XX_DBGC_CFG_DBGBUS_SEL_C },
2720 { A6XX_DBGC_CFG_DBGBUS_SEL_D },
2721 { A6XX_DBGC_CFG_DBGBUS_CNTLT },
2722 { A6XX_DBGC_CFG_DBGBUS_CNTLM },
2723 { A6XX_DBGC_CFG_DBGBUS_OPL },
2724 { A6XX_DBGC_CFG_DBGBUS_OPE },
2725 { A6XX_DBGC_CFG_DBGBUS_IVTL_0 },
2726 { A6XX_DBGC_CFG_DBGBUS_IVTL_1 },
2727 { A6XX_DBGC_CFG_DBGBUS_IVTL_2 },
2728 { A6XX_DBGC_CFG_DBGBUS_IVTL_3 },
2729 { A6XX_DBGC_CFG_DBGBUS_MASKL_0 },
2730 { A6XX_DBGC_CFG_DBGBUS_MASKL_1 },
2731 { A6XX_DBGC_CFG_DBGBUS_MASKL_2 },
2732 { A6XX_DBGC_CFG_DBGBUS_MASKL_3 },
2733 { A6XX_DBGC_CFG_DBGBUS_BYTEL_0 },
2734 { A6XX_DBGC_CFG_DBGBUS_BYTEL_1 },
2735 { A6XX_DBGC_CFG_DBGBUS_IVTE_0 },
2736 { A6XX_DBGC_CFG_DBGBUS_IVTE_1 },
2737 { A6XX_DBGC_CFG_DBGBUS_IVTE_2 },
2738 { A6XX_DBGC_CFG_DBGBUS_IVTE_3 },
2739 { A6XX_DBGC_CFG_DBGBUS_MASKE_0 },
2740 { A6XX_DBGC_CFG_DBGBUS_MASKE_1 },
2741 { A6XX_DBGC_CFG_DBGBUS_MASKE_2 },
2742 { A6XX_DBGC_CFG_DBGBUS_MASKE_3 },
2743 { A6XX_DBGC_CFG_DBGBUS_NIBBLEE },
2744 { A6XX_DBGC_CFG_DBGBUS_PTRC0 },
2745 { A6XX_DBGC_CFG_DBGBUS_PTRC1 },
2746 { A6XX_DBGC_CFG_DBGBUS_LOADREG },
2747 { A6XX_DBGC_CFG_DBGBUS_IDX },
2748 { A6XX_DBGC_CFG_DBGBUS_CLRC },
2749 { A6XX_DBGC_CFG_DBGBUS_LOADIVT },
2750 { A6XX_DBGC_VBIF_DBG_CNTL },
2751 { A6XX_DBGC_DBG_LO_HI_GPIO },
2752 { A6XX_DBGC_EXT_TRACE_BUS_CNTL },
2753 { A6XX_DBGC_READ_AHB_THROUGH_DBG },
2754 { A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 },
2755 { A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 },
2756 { A6XX_DBGC_EVT_CFG },
2757 { A6XX_DBGC_EVT_INTF_SEL_0 },
2758 { A6XX_DBGC_EVT_INTF_SEL_1 },
2759 { A6XX_DBGC_PERF_ATB_CFG },
2760 { A6XX_DBGC_PERF_ATB_COUNTER_SEL_0 },
2761 { A6XX_DBGC_PERF_ATB_COUNTER_SEL_1 },
2762 { A6XX_DBGC_PERF_ATB_COUNTER_SEL_2 },
2763 { A6XX_DBGC_PERF_ATB_COUNTER_SEL_3 },
2764 { A6XX_DBGC_PERF_ATB_TRIG_INTF_SEL_0 },
2765 { A6XX_DBGC_PERF_ATB_TRIG_INTF_SEL_1 },
2766 { A6XX_DBGC_PERF_ATB_DRAIN_CMD },
2767 { A6XX_DBGC_ECO_CNTL },
2768 { A6XX_DBGC_AHB_DBG_CNTL },
2769};
2770
2771static struct adreno_coresight_register a6xx_coresight_regs_cx[] = {
2772 { A6XX_CX_DBGC_CFG_DBGBUS_SEL_A },
2773 { A6XX_CX_DBGC_CFG_DBGBUS_SEL_B },
2774 { A6XX_CX_DBGC_CFG_DBGBUS_SEL_C },
2775 { A6XX_CX_DBGC_CFG_DBGBUS_SEL_D },
2776 { A6XX_CX_DBGC_CFG_DBGBUS_CNTLT },
2777 { A6XX_CX_DBGC_CFG_DBGBUS_CNTLM },
2778 { A6XX_CX_DBGC_CFG_DBGBUS_OPL },
2779 { A6XX_CX_DBGC_CFG_DBGBUS_OPE },
2780 { A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 },
2781 { A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 },
2782 { A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 },
2783 { A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 },
2784 { A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 },
2785 { A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 },
2786 { A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 },
2787 { A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 },
2788 { A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 },
2789 { A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 },
2790 { A6XX_CX_DBGC_CFG_DBGBUS_IVTE_0 },
2791 { A6XX_CX_DBGC_CFG_DBGBUS_IVTE_1 },
2792 { A6XX_CX_DBGC_CFG_DBGBUS_IVTE_2 },
2793 { A6XX_CX_DBGC_CFG_DBGBUS_IVTE_3 },
2794 { A6XX_CX_DBGC_CFG_DBGBUS_MASKE_0 },
2795 { A6XX_CX_DBGC_CFG_DBGBUS_MASKE_1 },
2796 { A6XX_CX_DBGC_CFG_DBGBUS_MASKE_2 },
2797 { A6XX_CX_DBGC_CFG_DBGBUS_MASKE_3 },
2798 { A6XX_CX_DBGC_CFG_DBGBUS_NIBBLEE },
2799 { A6XX_CX_DBGC_CFG_DBGBUS_PTRC0 },
2800 { A6XX_CX_DBGC_CFG_DBGBUS_PTRC1 },
2801 { A6XX_CX_DBGC_CFG_DBGBUS_LOADREG },
2802 { A6XX_CX_DBGC_CFG_DBGBUS_IDX },
2803 { A6XX_CX_DBGC_CFG_DBGBUS_CLRC },
2804 { A6XX_CX_DBGC_CFG_DBGBUS_LOADIVT },
2805 { A6XX_CX_DBGC_VBIF_DBG_CNTL },
2806 { A6XX_CX_DBGC_DBG_LO_HI_GPIO },
2807 { A6XX_CX_DBGC_EXT_TRACE_BUS_CNTL },
2808 { A6XX_CX_DBGC_READ_AHB_THROUGH_DBG },
2809 { A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 },
2810 { A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 },
2811 { A6XX_CX_DBGC_EVT_CFG },
2812 { A6XX_CX_DBGC_EVT_INTF_SEL_0 },
2813 { A6XX_CX_DBGC_EVT_INTF_SEL_1 },
2814 { A6XX_CX_DBGC_PERF_ATB_CFG },
2815 { A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_0 },
2816 { A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_1 },
2817 { A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_2 },
2818 { A6XX_CX_DBGC_PERF_ATB_COUNTER_SEL_3 },
2819 { A6XX_CX_DBGC_PERF_ATB_TRIG_INTF_SEL_0 },
2820 { A6XX_CX_DBGC_PERF_ATB_TRIG_INTF_SEL_1 },
2821 { A6XX_CX_DBGC_PERF_ATB_DRAIN_CMD },
2822 { A6XX_CX_DBGC_ECO_CNTL },
2823 { A6XX_CX_DBGC_AHB_DBG_CNTL },
2824};
2825
2826static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_a, &a6xx_coresight_regs[0]);
2827static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_b, &a6xx_coresight_regs[1]);
2828static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_c, &a6xx_coresight_regs[2]);
2829static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_sel_d, &a6xx_coresight_regs[3]);
2830static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_cntlt, &a6xx_coresight_regs[4]);
2831static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_cntlm, &a6xx_coresight_regs[5]);
2832static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_opl, &a6xx_coresight_regs[6]);
2833static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ope, &a6xx_coresight_regs[7]);
2834static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_0, &a6xx_coresight_regs[8]);
2835static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_1, &a6xx_coresight_regs[9]);
2836static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_2, &a6xx_coresight_regs[10]);
2837static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivtl_3, &a6xx_coresight_regs[11]);
2838static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_0, &a6xx_coresight_regs[12]);
2839static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_1, &a6xx_coresight_regs[13]);
2840static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_2, &a6xx_coresight_regs[14]);
2841static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maskl_3, &a6xx_coresight_regs[15]);
2842static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_bytel_0, &a6xx_coresight_regs[16]);
2843static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_bytel_1, &a6xx_coresight_regs[17]);
2844static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_0, &a6xx_coresight_regs[18]);
2845static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_1, &a6xx_coresight_regs[19]);
2846static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_2, &a6xx_coresight_regs[20]);
2847static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ivte_3, &a6xx_coresight_regs[21]);
2848static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_0, &a6xx_coresight_regs[22]);
2849static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_1, &a6xx_coresight_regs[23]);
2850static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_2, &a6xx_coresight_regs[24]);
2851static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_maske_3, &a6xx_coresight_regs[25]);
2852static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_nibblee, &a6xx_coresight_regs[26]);
2853static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ptrc0, &a6xx_coresight_regs[27]);
2854static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_ptrc1, &a6xx_coresight_regs[28]);
2855static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_loadreg, &a6xx_coresight_regs[29]);
2856static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_idx, &a6xx_coresight_regs[30]);
2857static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_clrc, &a6xx_coresight_regs[31]);
2858static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_loadivt, &a6xx_coresight_regs[32]);
2859static ADRENO_CORESIGHT_ATTR(vbif_dbg_cntl, &a6xx_coresight_regs[33]);
2860static ADRENO_CORESIGHT_ATTR(dbg_lo_hi_gpio, &a6xx_coresight_regs[34]);
2861static ADRENO_CORESIGHT_ATTR(ext_trace_bus_cntl, &a6xx_coresight_regs[35]);
2862static ADRENO_CORESIGHT_ATTR(read_ahb_through_dbg, &a6xx_coresight_regs[36]);
2863static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_buf1, &a6xx_coresight_regs[37]);
2864static ADRENO_CORESIGHT_ATTR(cfg_dbgbus_trace_buf2, &a6xx_coresight_regs[38]);
2865static ADRENO_CORESIGHT_ATTR(evt_cfg, &a6xx_coresight_regs[39]);
2866static ADRENO_CORESIGHT_ATTR(evt_intf_sel_0, &a6xx_coresight_regs[40]);
2867static ADRENO_CORESIGHT_ATTR(evt_intf_sel_1, &a6xx_coresight_regs[41]);
2868static ADRENO_CORESIGHT_ATTR(perf_atb_cfg, &a6xx_coresight_regs[42]);
2869static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_0, &a6xx_coresight_regs[43]);
2870static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_1, &a6xx_coresight_regs[44]);
2871static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_2, &a6xx_coresight_regs[45]);
2872static ADRENO_CORESIGHT_ATTR(perf_atb_counter_sel_3, &a6xx_coresight_regs[46]);
2873static ADRENO_CORESIGHT_ATTR(perf_atb_trig_intf_sel_0,
2874 &a6xx_coresight_regs[47]);
2875static ADRENO_CORESIGHT_ATTR(perf_atb_trig_intf_sel_1,
2876 &a6xx_coresight_regs[48]);
2877static ADRENO_CORESIGHT_ATTR(perf_atb_drain_cmd, &a6xx_coresight_regs[49]);
2878static ADRENO_CORESIGHT_ATTR(eco_cntl, &a6xx_coresight_regs[50]);
2879static ADRENO_CORESIGHT_ATTR(ahb_dbg_cntl, &a6xx_coresight_regs[51]);
2880
2881/*CX debug registers*/
2882static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_a,
2883 &a6xx_coresight_regs_cx[0]);
2884static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_b,
2885 &a6xx_coresight_regs_cx[1]);
2886static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_c,
2887 &a6xx_coresight_regs_cx[2]);
2888static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_sel_d,
2889 &a6xx_coresight_regs_cx[3]);
2890static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_cntlt,
2891 &a6xx_coresight_regs_cx[4]);
2892static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_cntlm,
2893 &a6xx_coresight_regs_cx[5]);
2894static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_opl,
2895 &a6xx_coresight_regs_cx[6]);
2896static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ope,
2897 &a6xx_coresight_regs_cx[7]);
2898static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_0,
2899 &a6xx_coresight_regs_cx[8]);
2900static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_1,
2901 &a6xx_coresight_regs_cx[9]);
2902static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_2,
2903 &a6xx_coresight_regs_cx[10]);
2904static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivtl_3,
2905 &a6xx_coresight_regs_cx[11]);
2906static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_0,
2907 &a6xx_coresight_regs_cx[12]);
2908static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_1,
2909 &a6xx_coresight_regs_cx[13]);
2910static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_2,
2911 &a6xx_coresight_regs_cx[14]);
2912static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maskl_3,
2913 &a6xx_coresight_regs_cx[15]);
2914static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_bytel_0,
2915 &a6xx_coresight_regs_cx[16]);
2916static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_bytel_1,
2917 &a6xx_coresight_regs_cx[17]);
2918static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_0,
2919 &a6xx_coresight_regs_cx[18]);
2920static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_1,
2921 &a6xx_coresight_regs_cx[19]);
2922static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_2,
2923 &a6xx_coresight_regs_cx[20]);
2924static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ivte_3,
2925 &a6xx_coresight_regs_cx[21]);
2926static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_0,
2927 &a6xx_coresight_regs_cx[22]);
2928static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_1,
2929 &a6xx_coresight_regs_cx[23]);
2930static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_2,
2931 &a6xx_coresight_regs_cx[24]);
2932static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_maske_3,
2933 &a6xx_coresight_regs_cx[25]);
2934static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_nibblee,
2935 &a6xx_coresight_regs_cx[26]);
2936static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ptrc0,
2937 &a6xx_coresight_regs_cx[27]);
2938static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_ptrc1,
2939 &a6xx_coresight_regs_cx[28]);
2940static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_loadreg,
2941 &a6xx_coresight_regs_cx[29]);
2942static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_idx,
2943 &a6xx_coresight_regs_cx[30]);
2944static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_clrc,
2945 &a6xx_coresight_regs_cx[31]);
2946static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_loadivt,
2947 &a6xx_coresight_regs_cx[32]);
2948static ADRENO_CORESIGHT_ATTR(cx_vbif_dbg_cntl,
2949 &a6xx_coresight_regs_cx[33]);
2950static ADRENO_CORESIGHT_ATTR(cx_dbg_lo_hi_gpio,
2951 &a6xx_coresight_regs_cx[34]);
2952static ADRENO_CORESIGHT_ATTR(cx_ext_trace_bus_cntl,
2953 &a6xx_coresight_regs_cx[35]);
2954static ADRENO_CORESIGHT_ATTR(cx_read_ahb_through_dbg,
2955 &a6xx_coresight_regs_cx[36]);
2956static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_trace_buf1,
2957 &a6xx_coresight_regs_cx[37]);
2958static ADRENO_CORESIGHT_ATTR(cx_cfg_dbgbus_trace_buf2,
2959 &a6xx_coresight_regs_cx[38]);
2960static ADRENO_CORESIGHT_ATTR(cx_evt_cfg,
2961 &a6xx_coresight_regs_cx[39]);
2962static ADRENO_CORESIGHT_ATTR(cx_evt_intf_sel_0,
2963 &a6xx_coresight_regs_cx[40]);
2964static ADRENO_CORESIGHT_ATTR(cx_evt_intf_sel_1,
2965 &a6xx_coresight_regs_cx[41]);
2966static ADRENO_CORESIGHT_ATTR(cx_perf_atb_cfg,
2967 &a6xx_coresight_regs_cx[42]);
2968static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_0,
2969 &a6xx_coresight_regs_cx[43]);
2970static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_1,
2971 &a6xx_coresight_regs_cx[44]);
2972static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_2,
2973 &a6xx_coresight_regs_cx[45]);
2974static ADRENO_CORESIGHT_ATTR(cx_perf_atb_counter_sel_3,
2975 &a6xx_coresight_regs_cx[46]);
2976static ADRENO_CORESIGHT_ATTR(cx_perf_atb_trig_intf_sel_0,
2977 &a6xx_coresight_regs_cx[47]);
2978static ADRENO_CORESIGHT_ATTR(cx_perf_atb_trig_intf_sel_1,
2979 &a6xx_coresight_regs_cx[48]);
2980static ADRENO_CORESIGHT_ATTR(cx_perf_atb_drain_cmd,
2981 &a6xx_coresight_regs_cx[49]);
2982static ADRENO_CORESIGHT_ATTR(cx_eco_cntl,
2983 &a6xx_coresight_regs_cx[50]);
2984static ADRENO_CORESIGHT_ATTR(cx_ahb_dbg_cntl,
2985 &a6xx_coresight_regs_cx[51]);
2986
2987static struct attribute *a6xx_coresight_attrs[] = {
2988 &coresight_attr_cfg_dbgbus_sel_a.attr.attr,
2989 &coresight_attr_cfg_dbgbus_sel_b.attr.attr,
2990 &coresight_attr_cfg_dbgbus_sel_c.attr.attr,
2991 &coresight_attr_cfg_dbgbus_sel_d.attr.attr,
2992 &coresight_attr_cfg_dbgbus_cntlt.attr.attr,
2993 &coresight_attr_cfg_dbgbus_cntlm.attr.attr,
2994 &coresight_attr_cfg_dbgbus_opl.attr.attr,
2995 &coresight_attr_cfg_dbgbus_ope.attr.attr,
2996 &coresight_attr_cfg_dbgbus_ivtl_0.attr.attr,
2997 &coresight_attr_cfg_dbgbus_ivtl_1.attr.attr,
2998 &coresight_attr_cfg_dbgbus_ivtl_2.attr.attr,
2999 &coresight_attr_cfg_dbgbus_ivtl_3.attr.attr,
3000 &coresight_attr_cfg_dbgbus_maskl_0.attr.attr,
3001 &coresight_attr_cfg_dbgbus_maskl_1.attr.attr,
3002 &coresight_attr_cfg_dbgbus_maskl_2.attr.attr,
3003 &coresight_attr_cfg_dbgbus_maskl_3.attr.attr,
3004 &coresight_attr_cfg_dbgbus_bytel_0.attr.attr,
3005 &coresight_attr_cfg_dbgbus_bytel_1.attr.attr,
3006 &coresight_attr_cfg_dbgbus_ivte_0.attr.attr,
3007 &coresight_attr_cfg_dbgbus_ivte_1.attr.attr,
3008 &coresight_attr_cfg_dbgbus_ivte_2.attr.attr,
3009 &coresight_attr_cfg_dbgbus_ivte_3.attr.attr,
3010 &coresight_attr_cfg_dbgbus_maske_0.attr.attr,
3011 &coresight_attr_cfg_dbgbus_maske_1.attr.attr,
3012 &coresight_attr_cfg_dbgbus_maske_2.attr.attr,
3013 &coresight_attr_cfg_dbgbus_maske_3.attr.attr,
3014 &coresight_attr_cfg_dbgbus_nibblee.attr.attr,
3015 &coresight_attr_cfg_dbgbus_ptrc0.attr.attr,
3016 &coresight_attr_cfg_dbgbus_ptrc1.attr.attr,
3017 &coresight_attr_cfg_dbgbus_loadreg.attr.attr,
3018 &coresight_attr_cfg_dbgbus_idx.attr.attr,
3019 &coresight_attr_cfg_dbgbus_clrc.attr.attr,
3020 &coresight_attr_cfg_dbgbus_loadivt.attr.attr,
3021 &coresight_attr_vbif_dbg_cntl.attr.attr,
3022 &coresight_attr_dbg_lo_hi_gpio.attr.attr,
3023 &coresight_attr_ext_trace_bus_cntl.attr.attr,
3024 &coresight_attr_read_ahb_through_dbg.attr.attr,
3025 &coresight_attr_cfg_dbgbus_trace_buf1.attr.attr,
3026 &coresight_attr_cfg_dbgbus_trace_buf2.attr.attr,
3027 &coresight_attr_evt_cfg.attr.attr,
3028 &coresight_attr_evt_intf_sel_0.attr.attr,
3029 &coresight_attr_evt_intf_sel_1.attr.attr,
3030 &coresight_attr_perf_atb_cfg.attr.attr,
3031 &coresight_attr_perf_atb_counter_sel_0.attr.attr,
3032 &coresight_attr_perf_atb_counter_sel_1.attr.attr,
3033 &coresight_attr_perf_atb_counter_sel_2.attr.attr,
3034 &coresight_attr_perf_atb_counter_sel_3.attr.attr,
3035 &coresight_attr_perf_atb_trig_intf_sel_0.attr.attr,
3036 &coresight_attr_perf_atb_trig_intf_sel_1.attr.attr,
3037 &coresight_attr_perf_atb_drain_cmd.attr.attr,
3038 &coresight_attr_eco_cntl.attr.attr,
3039 &coresight_attr_ahb_dbg_cntl.attr.attr,
3040 NULL,
3041};
3042
3043/*cx*/
3044static struct attribute *a6xx_coresight_attrs_cx[] = {
3045 &coresight_attr_cx_cfg_dbgbus_sel_a.attr.attr,
3046 &coresight_attr_cx_cfg_dbgbus_sel_b.attr.attr,
3047 &coresight_attr_cx_cfg_dbgbus_sel_c.attr.attr,
3048 &coresight_attr_cx_cfg_dbgbus_sel_d.attr.attr,
3049 &coresight_attr_cx_cfg_dbgbus_cntlt.attr.attr,
3050 &coresight_attr_cx_cfg_dbgbus_cntlm.attr.attr,
3051 &coresight_attr_cx_cfg_dbgbus_opl.attr.attr,
3052 &coresight_attr_cx_cfg_dbgbus_ope.attr.attr,
3053 &coresight_attr_cx_cfg_dbgbus_ivtl_0.attr.attr,
3054 &coresight_attr_cx_cfg_dbgbus_ivtl_1.attr.attr,
3055 &coresight_attr_cx_cfg_dbgbus_ivtl_2.attr.attr,
3056 &coresight_attr_cx_cfg_dbgbus_ivtl_3.attr.attr,
3057 &coresight_attr_cx_cfg_dbgbus_maskl_0.attr.attr,
3058 &coresight_attr_cx_cfg_dbgbus_maskl_1.attr.attr,
3059 &coresight_attr_cx_cfg_dbgbus_maskl_2.attr.attr,
3060 &coresight_attr_cx_cfg_dbgbus_maskl_3.attr.attr,
3061 &coresight_attr_cx_cfg_dbgbus_bytel_0.attr.attr,
3062 &coresight_attr_cx_cfg_dbgbus_bytel_1.attr.attr,
3063 &coresight_attr_cx_cfg_dbgbus_ivte_0.attr.attr,
3064 &coresight_attr_cx_cfg_dbgbus_ivte_1.attr.attr,
3065 &coresight_attr_cx_cfg_dbgbus_ivte_2.attr.attr,
3066 &coresight_attr_cx_cfg_dbgbus_ivte_3.attr.attr,
3067 &coresight_attr_cx_cfg_dbgbus_maske_0.attr.attr,
3068 &coresight_attr_cx_cfg_dbgbus_maske_1.attr.attr,
3069 &coresight_attr_cx_cfg_dbgbus_maske_2.attr.attr,
3070 &coresight_attr_cx_cfg_dbgbus_maske_3.attr.attr,
3071 &coresight_attr_cx_cfg_dbgbus_nibblee.attr.attr,
3072 &coresight_attr_cx_cfg_dbgbus_ptrc0.attr.attr,
3073 &coresight_attr_cx_cfg_dbgbus_ptrc1.attr.attr,
3074 &coresight_attr_cx_cfg_dbgbus_loadreg.attr.attr,
3075 &coresight_attr_cx_cfg_dbgbus_idx.attr.attr,
3076 &coresight_attr_cx_cfg_dbgbus_clrc.attr.attr,
3077 &coresight_attr_cx_cfg_dbgbus_loadivt.attr.attr,
3078 &coresight_attr_cx_vbif_dbg_cntl.attr.attr,
3079 &coresight_attr_cx_dbg_lo_hi_gpio.attr.attr,
3080 &coresight_attr_cx_ext_trace_bus_cntl.attr.attr,
3081 &coresight_attr_cx_read_ahb_through_dbg.attr.attr,
3082 &coresight_attr_cx_cfg_dbgbus_trace_buf1.attr.attr,
3083 &coresight_attr_cx_cfg_dbgbus_trace_buf2.attr.attr,
3084 &coresight_attr_cx_evt_cfg.attr.attr,
3085 &coresight_attr_cx_evt_intf_sel_0.attr.attr,
3086 &coresight_attr_cx_evt_intf_sel_1.attr.attr,
3087 &coresight_attr_cx_perf_atb_cfg.attr.attr,
3088 &coresight_attr_cx_perf_atb_counter_sel_0.attr.attr,
3089 &coresight_attr_cx_perf_atb_counter_sel_1.attr.attr,
3090 &coresight_attr_cx_perf_atb_counter_sel_2.attr.attr,
3091 &coresight_attr_cx_perf_atb_counter_sel_3.attr.attr,
3092 &coresight_attr_cx_perf_atb_trig_intf_sel_0.attr.attr,
3093 &coresight_attr_cx_perf_atb_trig_intf_sel_1.attr.attr,
3094 &coresight_attr_cx_perf_atb_drain_cmd.attr.attr,
3095 &coresight_attr_cx_eco_cntl.attr.attr,
3096 &coresight_attr_cx_ahb_dbg_cntl.attr.attr,
3097 NULL,
3098};
3099
3100static const struct attribute_group a6xx_coresight_group = {
3101 .attrs = a6xx_coresight_attrs,
3102};
3103
3104static const struct attribute_group *a6xx_coresight_groups[] = {
3105 &a6xx_coresight_group,
3106 NULL,
3107};
3108
3109static const struct attribute_group a6xx_coresight_group_cx = {
3110 .attrs = a6xx_coresight_attrs_cx,
3111};
3112
3113static const struct attribute_group *a6xx_coresight_groups_cx[] = {
3114 &a6xx_coresight_group_cx,
3115 NULL,
3116};
3117
3118static struct adreno_coresight a6xx_coresight = {
3119 .registers = a6xx_coresight_regs,
3120 .count = ARRAY_SIZE(a6xx_coresight_regs),
3121 .groups = a6xx_coresight_groups,
3122};
3123
3124static struct adreno_coresight a6xx_coresight_cx = {
3125 .registers = a6xx_coresight_regs_cx,
3126 .count = ARRAY_SIZE(a6xx_coresight_regs_cx),
3127 .groups = a6xx_coresight_groups_cx,
3128};
3129
Lynus Vaz107d2892017-03-01 13:48:06 +05303130static struct adreno_perfcount_register a6xx_perfcounters_cp[] = {
3131 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO,
3132 A6XX_RBBM_PERFCTR_CP_0_HI, 0, A6XX_CP_PERFCTR_CP_SEL_0 },
3133 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_1_LO,
3134 A6XX_RBBM_PERFCTR_CP_1_HI, 1, A6XX_CP_PERFCTR_CP_SEL_1 },
3135 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_2_LO,
3136 A6XX_RBBM_PERFCTR_CP_2_HI, 2, A6XX_CP_PERFCTR_CP_SEL_2 },
3137 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_3_LO,
3138 A6XX_RBBM_PERFCTR_CP_3_HI, 3, A6XX_CP_PERFCTR_CP_SEL_3 },
3139 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_4_LO,
3140 A6XX_RBBM_PERFCTR_CP_4_HI, 4, A6XX_CP_PERFCTR_CP_SEL_4 },
3141 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_5_LO,
3142 A6XX_RBBM_PERFCTR_CP_5_HI, 5, A6XX_CP_PERFCTR_CP_SEL_5 },
3143 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_6_LO,
3144 A6XX_RBBM_PERFCTR_CP_6_HI, 6, A6XX_CP_PERFCTR_CP_SEL_6 },
3145 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_7_LO,
3146 A6XX_RBBM_PERFCTR_CP_7_HI, 7, A6XX_CP_PERFCTR_CP_SEL_7 },
3147 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_8_LO,
3148 A6XX_RBBM_PERFCTR_CP_8_HI, 8, A6XX_CP_PERFCTR_CP_SEL_8 },
3149 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_9_LO,
3150 A6XX_RBBM_PERFCTR_CP_9_HI, 9, A6XX_CP_PERFCTR_CP_SEL_9 },
3151 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_10_LO,
3152 A6XX_RBBM_PERFCTR_CP_10_HI, 10, A6XX_CP_PERFCTR_CP_SEL_10 },
3153 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_11_LO,
3154 A6XX_RBBM_PERFCTR_CP_11_HI, 11, A6XX_CP_PERFCTR_CP_SEL_11 },
3155 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_12_LO,
3156 A6XX_RBBM_PERFCTR_CP_12_HI, 12, A6XX_CP_PERFCTR_CP_SEL_12 },
3157 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_13_LO,
3158 A6XX_RBBM_PERFCTR_CP_13_HI, 13, A6XX_CP_PERFCTR_CP_SEL_13 },
3159};
3160
3161static struct adreno_perfcount_register a6xx_perfcounters_rbbm[] = {
3162 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_0_LO,
3163 A6XX_RBBM_PERFCTR_RBBM_0_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_0 },
3164 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_1_LO,
3165 A6XX_RBBM_PERFCTR_RBBM_1_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_1 },
3166 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_2_LO,
3167 A6XX_RBBM_PERFCTR_RBBM_2_HI, 16, A6XX_RBBM_PERFCTR_RBBM_SEL_2 },
3168 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_3_LO,
3169 A6XX_RBBM_PERFCTR_RBBM_3_HI, 17, A6XX_RBBM_PERFCTR_RBBM_SEL_3 },
3170};
3171
3172static struct adreno_perfcount_register a6xx_perfcounters_pc[] = {
3173 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_0_LO,
3174 A6XX_RBBM_PERFCTR_PC_0_HI, 18, A6XX_PC_PERFCTR_PC_SEL_0 },
3175 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_1_LO,
3176 A6XX_RBBM_PERFCTR_PC_1_HI, 19, A6XX_PC_PERFCTR_PC_SEL_1 },
3177 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_2_LO,
3178 A6XX_RBBM_PERFCTR_PC_2_HI, 20, A6XX_PC_PERFCTR_PC_SEL_2 },
3179 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_3_LO,
3180 A6XX_RBBM_PERFCTR_PC_3_HI, 21, A6XX_PC_PERFCTR_PC_SEL_3 },
3181 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_4_LO,
3182 A6XX_RBBM_PERFCTR_PC_4_HI, 22, A6XX_PC_PERFCTR_PC_SEL_4 },
3183 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_5_LO,
3184 A6XX_RBBM_PERFCTR_PC_5_HI, 23, A6XX_PC_PERFCTR_PC_SEL_5 },
3185 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_6_LO,
3186 A6XX_RBBM_PERFCTR_PC_6_HI, 24, A6XX_PC_PERFCTR_PC_SEL_6 },
3187 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_7_LO,
3188 A6XX_RBBM_PERFCTR_PC_7_HI, 25, A6XX_PC_PERFCTR_PC_SEL_7 },
3189};
3190
3191static struct adreno_perfcount_register a6xx_perfcounters_vfd[] = {
3192 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_0_LO,
3193 A6XX_RBBM_PERFCTR_VFD_0_HI, 26, A6XX_VFD_PERFCTR_VFD_SEL_0 },
3194 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_1_LO,
3195 A6XX_RBBM_PERFCTR_VFD_1_HI, 27, A6XX_VFD_PERFCTR_VFD_SEL_1 },
3196 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_2_LO,
3197 A6XX_RBBM_PERFCTR_VFD_2_HI, 28, A6XX_VFD_PERFCTR_VFD_SEL_2 },
3198 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_3_LO,
3199 A6XX_RBBM_PERFCTR_VFD_3_HI, 29, A6XX_VFD_PERFCTR_VFD_SEL_3 },
3200 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_4_LO,
3201 A6XX_RBBM_PERFCTR_VFD_4_HI, 30, A6XX_VFD_PERFCTR_VFD_SEL_4 },
3202 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_5_LO,
3203 A6XX_RBBM_PERFCTR_VFD_5_HI, 31, A6XX_VFD_PERFCTR_VFD_SEL_5 },
3204 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_6_LO,
3205 A6XX_RBBM_PERFCTR_VFD_6_HI, 32, A6XX_VFD_PERFCTR_VFD_SEL_6 },
3206 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_7_LO,
3207 A6XX_RBBM_PERFCTR_VFD_7_HI, 33, A6XX_VFD_PERFCTR_VFD_SEL_7 },
3208};
3209
3210static struct adreno_perfcount_register a6xx_perfcounters_hlsq[] = {
3211 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_0_LO,
3212 A6XX_RBBM_PERFCTR_HLSQ_0_HI, 34, A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
3213 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_1_LO,
3214 A6XX_RBBM_PERFCTR_HLSQ_1_HI, 35, A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
3215 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_2_LO,
3216 A6XX_RBBM_PERFCTR_HLSQ_2_HI, 36, A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
3217 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_3_LO,
3218 A6XX_RBBM_PERFCTR_HLSQ_3_HI, 37, A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
3219 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_4_LO,
3220 A6XX_RBBM_PERFCTR_HLSQ_4_HI, 38, A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
3221 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_5_LO,
3222 A6XX_RBBM_PERFCTR_HLSQ_5_HI, 39, A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
3223};
3224
3225static struct adreno_perfcount_register a6xx_perfcounters_vpc[] = {
3226 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_0_LO,
3227 A6XX_RBBM_PERFCTR_VPC_0_HI, 40, A6XX_VPC_PERFCTR_VPC_SEL_0 },
3228 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_1_LO,
3229 A6XX_RBBM_PERFCTR_VPC_1_HI, 41, A6XX_VPC_PERFCTR_VPC_SEL_1 },
3230 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_2_LO,
3231 A6XX_RBBM_PERFCTR_VPC_2_HI, 42, A6XX_VPC_PERFCTR_VPC_SEL_2 },
3232 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_3_LO,
3233 A6XX_RBBM_PERFCTR_VPC_3_HI, 43, A6XX_VPC_PERFCTR_VPC_SEL_3 },
3234 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_4_LO,
3235 A6XX_RBBM_PERFCTR_VPC_4_HI, 44, A6XX_VPC_PERFCTR_VPC_SEL_4 },
3236 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_5_LO,
3237 A6XX_RBBM_PERFCTR_VPC_5_HI, 45, A6XX_VPC_PERFCTR_VPC_SEL_5 },
3238};
3239
3240static struct adreno_perfcount_register a6xx_perfcounters_ccu[] = {
3241 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_0_LO,
3242 A6XX_RBBM_PERFCTR_CCU_0_HI, 46, A6XX_RB_PERFCTR_CCU_SEL_0 },
3243 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_1_LO,
3244 A6XX_RBBM_PERFCTR_CCU_1_HI, 47, A6XX_RB_PERFCTR_CCU_SEL_1 },
3245 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_2_LO,
3246 A6XX_RBBM_PERFCTR_CCU_2_HI, 48, A6XX_RB_PERFCTR_CCU_SEL_2 },
3247 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_3_LO,
3248 A6XX_RBBM_PERFCTR_CCU_3_HI, 49, A6XX_RB_PERFCTR_CCU_SEL_3 },
3249 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_4_LO,
3250 A6XX_RBBM_PERFCTR_CCU_4_HI, 50, A6XX_RB_PERFCTR_CCU_SEL_4 },
3251};
3252
3253static struct adreno_perfcount_register a6xx_perfcounters_tse[] = {
3254 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_0_LO,
3255 A6XX_RBBM_PERFCTR_TSE_0_HI, 51, A6XX_GRAS_PERFCTR_TSE_SEL_0 },
3256 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_1_LO,
3257 A6XX_RBBM_PERFCTR_TSE_1_HI, 52, A6XX_GRAS_PERFCTR_TSE_SEL_1 },
3258 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_2_LO,
3259 A6XX_RBBM_PERFCTR_TSE_2_HI, 53, A6XX_GRAS_PERFCTR_TSE_SEL_2 },
3260 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_3_LO,
3261 A6XX_RBBM_PERFCTR_TSE_3_HI, 54, A6XX_GRAS_PERFCTR_TSE_SEL_3 },
3262};
3263
3264static struct adreno_perfcount_register a6xx_perfcounters_ras[] = {
3265 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_0_LO,
3266 A6XX_RBBM_PERFCTR_RAS_0_HI, 55, A6XX_GRAS_PERFCTR_RAS_SEL_0 },
3267 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_1_LO,
3268 A6XX_RBBM_PERFCTR_RAS_1_HI, 56, A6XX_GRAS_PERFCTR_RAS_SEL_1 },
3269 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_2_LO,
3270 A6XX_RBBM_PERFCTR_RAS_2_HI, 57, A6XX_GRAS_PERFCTR_RAS_SEL_2 },
3271 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_3_LO,
3272 A6XX_RBBM_PERFCTR_RAS_3_HI, 58, A6XX_GRAS_PERFCTR_RAS_SEL_3 },
3273};
3274
3275static struct adreno_perfcount_register a6xx_perfcounters_uche[] = {
3276 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_0_LO,
3277 A6XX_RBBM_PERFCTR_UCHE_0_HI, 59, A6XX_UCHE_PERFCTR_UCHE_SEL_0 },
3278 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_1_LO,
3279 A6XX_RBBM_PERFCTR_UCHE_1_HI, 60, A6XX_UCHE_PERFCTR_UCHE_SEL_1 },
3280 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_2_LO,
3281 A6XX_RBBM_PERFCTR_UCHE_2_HI, 61, A6XX_UCHE_PERFCTR_UCHE_SEL_2 },
3282 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_3_LO,
3283 A6XX_RBBM_PERFCTR_UCHE_3_HI, 62, A6XX_UCHE_PERFCTR_UCHE_SEL_3 },
3284 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_4_LO,
3285 A6XX_RBBM_PERFCTR_UCHE_4_HI, 63, A6XX_UCHE_PERFCTR_UCHE_SEL_4 },
3286 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_5_LO,
3287 A6XX_RBBM_PERFCTR_UCHE_5_HI, 64, A6XX_UCHE_PERFCTR_UCHE_SEL_5 },
3288 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_6_LO,
3289 A6XX_RBBM_PERFCTR_UCHE_6_HI, 65, A6XX_UCHE_PERFCTR_UCHE_SEL_6 },
3290 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_7_LO,
3291 A6XX_RBBM_PERFCTR_UCHE_7_HI, 66, A6XX_UCHE_PERFCTR_UCHE_SEL_7 },
3292 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_8_LO,
3293 A6XX_RBBM_PERFCTR_UCHE_8_HI, 67, A6XX_UCHE_PERFCTR_UCHE_SEL_8 },
3294 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_9_LO,
3295 A6XX_RBBM_PERFCTR_UCHE_9_HI, 68, A6XX_UCHE_PERFCTR_UCHE_SEL_9 },
3296 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_10_LO,
3297 A6XX_RBBM_PERFCTR_UCHE_10_HI, 69,
3298 A6XX_UCHE_PERFCTR_UCHE_SEL_10 },
3299 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_11_LO,
3300 A6XX_RBBM_PERFCTR_UCHE_11_HI, 70,
3301 A6XX_UCHE_PERFCTR_UCHE_SEL_11 },
3302};
3303
3304static struct adreno_perfcount_register a6xx_perfcounters_tp[] = {
3305 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_0_LO,
3306 A6XX_RBBM_PERFCTR_TP_0_HI, 71, A6XX_TPL1_PERFCTR_TP_SEL_0 },
3307 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_1_LO,
3308 A6XX_RBBM_PERFCTR_TP_1_HI, 72, A6XX_TPL1_PERFCTR_TP_SEL_1 },
3309 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_2_LO,
3310 A6XX_RBBM_PERFCTR_TP_2_HI, 73, A6XX_TPL1_PERFCTR_TP_SEL_2 },
3311 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_3_LO,
3312 A6XX_RBBM_PERFCTR_TP_3_HI, 74, A6XX_TPL1_PERFCTR_TP_SEL_3 },
3313 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_4_LO,
3314 A6XX_RBBM_PERFCTR_TP_4_HI, 75, A6XX_TPL1_PERFCTR_TP_SEL_4 },
3315 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_5_LO,
3316 A6XX_RBBM_PERFCTR_TP_5_HI, 76, A6XX_TPL1_PERFCTR_TP_SEL_5 },
3317 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_6_LO,
3318 A6XX_RBBM_PERFCTR_TP_6_HI, 77, A6XX_TPL1_PERFCTR_TP_SEL_6 },
3319 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_7_LO,
3320 A6XX_RBBM_PERFCTR_TP_7_HI, 78, A6XX_TPL1_PERFCTR_TP_SEL_7 },
3321 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_8_LO,
3322 A6XX_RBBM_PERFCTR_TP_8_HI, 79, A6XX_TPL1_PERFCTR_TP_SEL_8 },
3323 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_9_LO,
3324 A6XX_RBBM_PERFCTR_TP_9_HI, 80, A6XX_TPL1_PERFCTR_TP_SEL_9 },
3325 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_10_LO,
3326 A6XX_RBBM_PERFCTR_TP_10_HI, 81, A6XX_TPL1_PERFCTR_TP_SEL_10 },
3327 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_11_LO,
3328 A6XX_RBBM_PERFCTR_TP_11_HI, 82, A6XX_TPL1_PERFCTR_TP_SEL_11 },
3329};
3330
3331static struct adreno_perfcount_register a6xx_perfcounters_sp[] = {
3332 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_0_LO,
3333 A6XX_RBBM_PERFCTR_SP_0_HI, 83, A6XX_SP_PERFCTR_SP_SEL_0 },
3334 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_1_LO,
3335 A6XX_RBBM_PERFCTR_SP_1_HI, 84, A6XX_SP_PERFCTR_SP_SEL_1 },
3336 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_2_LO,
3337 A6XX_RBBM_PERFCTR_SP_2_HI, 85, A6XX_SP_PERFCTR_SP_SEL_2 },
3338 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_3_LO,
3339 A6XX_RBBM_PERFCTR_SP_3_HI, 86, A6XX_SP_PERFCTR_SP_SEL_3 },
3340 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_4_LO,
3341 A6XX_RBBM_PERFCTR_SP_4_HI, 87, A6XX_SP_PERFCTR_SP_SEL_4 },
3342 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_5_LO,
3343 A6XX_RBBM_PERFCTR_SP_5_HI, 88, A6XX_SP_PERFCTR_SP_SEL_5 },
3344 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_6_LO,
3345 A6XX_RBBM_PERFCTR_SP_6_HI, 89, A6XX_SP_PERFCTR_SP_SEL_6 },
3346 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_7_LO,
3347 A6XX_RBBM_PERFCTR_SP_7_HI, 90, A6XX_SP_PERFCTR_SP_SEL_7 },
3348 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_8_LO,
3349 A6XX_RBBM_PERFCTR_SP_8_HI, 91, A6XX_SP_PERFCTR_SP_SEL_8 },
3350 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_9_LO,
3351 A6XX_RBBM_PERFCTR_SP_9_HI, 92, A6XX_SP_PERFCTR_SP_SEL_9 },
3352 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_10_LO,
3353 A6XX_RBBM_PERFCTR_SP_10_HI, 93, A6XX_SP_PERFCTR_SP_SEL_10 },
3354 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_11_LO,
3355 A6XX_RBBM_PERFCTR_SP_11_HI, 94, A6XX_SP_PERFCTR_SP_SEL_11 },
3356 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_12_LO,
3357 A6XX_RBBM_PERFCTR_SP_12_HI, 95, A6XX_SP_PERFCTR_SP_SEL_12 },
3358 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_13_LO,
3359 A6XX_RBBM_PERFCTR_SP_13_HI, 96, A6XX_SP_PERFCTR_SP_SEL_13 },
3360 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_14_LO,
3361 A6XX_RBBM_PERFCTR_SP_14_HI, 97, A6XX_SP_PERFCTR_SP_SEL_14 },
3362 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_15_LO,
3363 A6XX_RBBM_PERFCTR_SP_15_HI, 98, A6XX_SP_PERFCTR_SP_SEL_15 },
3364 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_16_LO,
3365 A6XX_RBBM_PERFCTR_SP_16_HI, 99, A6XX_SP_PERFCTR_SP_SEL_16 },
3366 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_17_LO,
3367 A6XX_RBBM_PERFCTR_SP_17_HI, 100, A6XX_SP_PERFCTR_SP_SEL_17 },
3368 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_18_LO,
3369 A6XX_RBBM_PERFCTR_SP_18_HI, 101, A6XX_SP_PERFCTR_SP_SEL_18 },
3370 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_19_LO,
3371 A6XX_RBBM_PERFCTR_SP_19_HI, 102, A6XX_SP_PERFCTR_SP_SEL_19 },
3372 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_20_LO,
3373 A6XX_RBBM_PERFCTR_SP_20_HI, 103, A6XX_SP_PERFCTR_SP_SEL_20 },
3374 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_21_LO,
3375 A6XX_RBBM_PERFCTR_SP_21_HI, 104, A6XX_SP_PERFCTR_SP_SEL_21 },
3376 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_22_LO,
3377 A6XX_RBBM_PERFCTR_SP_22_HI, 105, A6XX_SP_PERFCTR_SP_SEL_22 },
3378 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_23_LO,
3379 A6XX_RBBM_PERFCTR_SP_23_HI, 106, A6XX_SP_PERFCTR_SP_SEL_23 },
3380};
3381
3382static struct adreno_perfcount_register a6xx_perfcounters_rb[] = {
3383 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_0_LO,
3384 A6XX_RBBM_PERFCTR_RB_0_HI, 107, A6XX_RB_PERFCTR_RB_SEL_0 },
3385 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_1_LO,
3386 A6XX_RBBM_PERFCTR_RB_1_HI, 108, A6XX_RB_PERFCTR_RB_SEL_1 },
3387 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_2_LO,
3388 A6XX_RBBM_PERFCTR_RB_2_HI, 109, A6XX_RB_PERFCTR_RB_SEL_2 },
3389 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_3_LO,
3390 A6XX_RBBM_PERFCTR_RB_3_HI, 110, A6XX_RB_PERFCTR_RB_SEL_3 },
3391 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_4_LO,
3392 A6XX_RBBM_PERFCTR_RB_4_HI, 111, A6XX_RB_PERFCTR_RB_SEL_4 },
3393 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_5_LO,
3394 A6XX_RBBM_PERFCTR_RB_5_HI, 112, A6XX_RB_PERFCTR_RB_SEL_5 },
3395 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_6_LO,
3396 A6XX_RBBM_PERFCTR_RB_6_HI, 113, A6XX_RB_PERFCTR_RB_SEL_6 },
3397 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_7_LO,
3398 A6XX_RBBM_PERFCTR_RB_7_HI, 114, A6XX_RB_PERFCTR_RB_SEL_7 },
3399};
3400
3401static struct adreno_perfcount_register a6xx_perfcounters_vsc[] = {
3402 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_0_LO,
3403 A6XX_RBBM_PERFCTR_VSC_0_HI, 115, A6XX_VSC_PERFCTR_VSC_SEL_0 },
3404 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_1_LO,
3405 A6XX_RBBM_PERFCTR_VSC_1_HI, 116, A6XX_VSC_PERFCTR_VSC_SEL_1 },
3406};
3407
3408static struct adreno_perfcount_register a6xx_perfcounters_lrz[] = {
3409 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_0_LO,
3410 A6XX_RBBM_PERFCTR_LRZ_0_HI, 117, A6XX_GRAS_PERFCTR_LRZ_SEL_0 },
3411 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_1_LO,
3412 A6XX_RBBM_PERFCTR_LRZ_1_HI, 118, A6XX_GRAS_PERFCTR_LRZ_SEL_1 },
3413 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_2_LO,
3414 A6XX_RBBM_PERFCTR_LRZ_2_HI, 119, A6XX_GRAS_PERFCTR_LRZ_SEL_2 },
3415 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_3_LO,
3416 A6XX_RBBM_PERFCTR_LRZ_3_HI, 120, A6XX_GRAS_PERFCTR_LRZ_SEL_3 },
3417};
3418
3419static struct adreno_perfcount_register a6xx_perfcounters_cmp[] = {
3420 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_0_LO,
3421 A6XX_RBBM_PERFCTR_CMP_0_HI, 121, A6XX_RB_PERFCTR_CMP_SEL_0 },
3422 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_1_LO,
3423 A6XX_RBBM_PERFCTR_CMP_1_HI, 122, A6XX_RB_PERFCTR_CMP_SEL_1 },
3424 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_2_LO,
3425 A6XX_RBBM_PERFCTR_CMP_2_HI, 123, A6XX_RB_PERFCTR_CMP_SEL_2 },
3426 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_3_LO,
3427 A6XX_RBBM_PERFCTR_CMP_3_HI, 124, A6XX_RB_PERFCTR_CMP_SEL_3 },
3428};
3429
3430static struct adreno_perfcount_register a6xx_perfcounters_vbif[] = {
3431 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW0,
3432 A6XX_VBIF_PERF_CNT_HIGH0, -1, A6XX_VBIF_PERF_CNT_SEL0 },
3433 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW1,
3434 A6XX_VBIF_PERF_CNT_HIGH1, -1, A6XX_VBIF_PERF_CNT_SEL1 },
3435 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW2,
3436 A6XX_VBIF_PERF_CNT_HIGH2, -1, A6XX_VBIF_PERF_CNT_SEL2 },
3437 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW3,
3438 A6XX_VBIF_PERF_CNT_HIGH3, -1, A6XX_VBIF_PERF_CNT_SEL3 },
3439};
3440
3441static struct adreno_perfcount_register a6xx_perfcounters_vbif_pwr[] = {
3442 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW0,
3443 A6XX_VBIF_PERF_PWR_CNT_HIGH0, -1, A6XX_VBIF_PERF_PWR_CNT_EN0 },
3444 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW1,
3445 A6XX_VBIF_PERF_PWR_CNT_HIGH1, -1, A6XX_VBIF_PERF_PWR_CNT_EN1 },
3446 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW2,
3447 A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
3448};
3449
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05303450
3451static struct adreno_perfcount_register a6xx_perfcounters_gbif[] = {
3452 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW0,
3453 A6XX_GBIF_PERF_CNT_HIGH0, -1, A6XX_GBIF_PERF_CNT_SEL },
3454 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW1,
3455 A6XX_GBIF_PERF_CNT_HIGH1, -1, A6XX_GBIF_PERF_CNT_SEL },
3456 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW2,
3457 A6XX_GBIF_PERF_CNT_HIGH2, -1, A6XX_GBIF_PERF_CNT_SEL },
3458 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW3,
3459 A6XX_GBIF_PERF_CNT_HIGH3, -1, A6XX_GBIF_PERF_CNT_SEL },
3460};
3461
3462static struct adreno_perfcount_register a6xx_perfcounters_gbif_pwr[] = {
3463 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW0,
3464 A6XX_GBIF_PWR_CNT_HIGH0, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
3465 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW1,
3466 A6XX_GBIF_PWR_CNT_HIGH1, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
3467 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW2,
3468 A6XX_GBIF_PWR_CNT_HIGH2, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
3469};
3470
Lynus Vaz856ca602017-05-24 16:56:36 +05303471static struct adreno_perfcount_register a6xx_perfcounters_pwr[] = {
3472 { KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
3473 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3474 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
3475 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1, 0 },
3476};
3477
Lynus Vaz107d2892017-03-01 13:48:06 +05303478static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
3479 { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
3480 A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
3481};
3482
Lynus Vaz4fc97e22017-06-01 20:03:35 +05303483static struct adreno_perfcount_register a6xx_pwrcounters_gpmu[] = {
3484 /*
3485 * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0 is used for the GPU
3486 * busy count (see the PWR group above). Mark it as broken
3487 * so it's not re-used.
3488 */
3489 { KGSL_PERFCOUNTER_BROKEN, 0, 0,
3490 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
3491 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1,
3492 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
3493 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3494 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
3495 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H, -1,
3496 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
3497 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3498 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
3499 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H, -1,
3500 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
3501 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3502 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
3503 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H, -1,
3504 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
3505 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3506 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L,
3507 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H, -1,
3508 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
3509 { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
3510 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L,
3511 A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H, -1,
3512 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
3513};
3514
Tarun Karra1382e512017-10-30 19:41:25 -07003515/*
3516 * ADRENO_PERFCOUNTER_GROUP_RESTORE flag is enabled by default
3517 * because most of the perfcounter groups need to be restored
3518 * as part of preemption and IFPC. Perfcounter groups that are
3519 * not restored as part of preemption and IFPC should be defined
3520 * using A6XX_PERFCOUNTER_GROUP_FLAGS macro
3521 */
Lynus Vaz107d2892017-03-01 13:48:06 +05303522#define A6XX_PERFCOUNTER_GROUP(offset, name) \
Tarun Karra1382e512017-10-30 19:41:25 -07003523 ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, \
3524 ADRENO_PERFCOUNTER_GROUP_RESTORE)
Lynus Vaz107d2892017-03-01 13:48:06 +05303525
3526#define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
3527 ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
3528
Lynus Vaz4fc97e22017-06-01 20:03:35 +05303529#define A6XX_POWER_COUNTER_GROUP(offset, name) \
3530 ADRENO_POWER_COUNTER_GROUP(a6xx, offset, name)
3531
Lynus Vaz107d2892017-03-01 13:48:06 +05303532static struct adreno_perfcount_group a6xx_perfcounter_groups
3533 [KGSL_PERFCOUNTER_GROUP_MAX] = {
3534 A6XX_PERFCOUNTER_GROUP(CP, cp),
Tarun Karra1382e512017-10-30 19:41:25 -07003535 A6XX_PERFCOUNTER_GROUP_FLAGS(RBBM, rbbm, 0),
Lynus Vaz107d2892017-03-01 13:48:06 +05303536 A6XX_PERFCOUNTER_GROUP(PC, pc),
3537 A6XX_PERFCOUNTER_GROUP(VFD, vfd),
3538 A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
3539 A6XX_PERFCOUNTER_GROUP(VPC, vpc),
3540 A6XX_PERFCOUNTER_GROUP(CCU, ccu),
3541 A6XX_PERFCOUNTER_GROUP(CMP, cmp),
3542 A6XX_PERFCOUNTER_GROUP(TSE, tse),
3543 A6XX_PERFCOUNTER_GROUP(RAS, ras),
3544 A6XX_PERFCOUNTER_GROUP(LRZ, lrz),
3545 A6XX_PERFCOUNTER_GROUP(UCHE, uche),
3546 A6XX_PERFCOUNTER_GROUP(TP, tp),
3547 A6XX_PERFCOUNTER_GROUP(SP, sp),
3548 A6XX_PERFCOUNTER_GROUP(RB, rb),
3549 A6XX_PERFCOUNTER_GROUP(VSC, vsc),
Tarun Karra1382e512017-10-30 19:41:25 -07003550 A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, vbif, 0),
Lynus Vaz107d2892017-03-01 13:48:06 +05303551 A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
3552 ADRENO_PERFCOUNTER_GROUP_FIXED),
Lynus Vaz856ca602017-05-24 16:56:36 +05303553 A6XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
3554 ADRENO_PERFCOUNTER_GROUP_FIXED),
Lynus Vaz107d2892017-03-01 13:48:06 +05303555 A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
3556 ADRENO_PERFCOUNTER_GROUP_FIXED),
Lynus Vaz4fc97e22017-06-01 20:03:35 +05303557 A6XX_POWER_COUNTER_GROUP(GPMU, gpmu),
Lynus Vaz107d2892017-03-01 13:48:06 +05303558};
3559
3560static struct adreno_perfcounters a6xx_perfcounters = {
3561 a6xx_perfcounter_groups,
3562 ARRAY_SIZE(a6xx_perfcounter_groups),
3563};
3564
Lynus Vaz856ca602017-05-24 16:56:36 +05303565/* Program the GMU power counter to count GPU busy cycles */
3566static int a6xx_enable_pwr_counters(struct adreno_device *adreno_dev,
3567 unsigned int counter)
3568{
3569 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
3570
3571 /*
3572 * We have a limited number of power counters. Since we're not using
3573 * total GPU cycle count, return error if requested.
3574 */
3575 if (counter == 0)
3576 return -EINVAL;
3577
3578 if (!device->gmu.pdev)
3579 return -ENODEV;
3580
Kyle Piefer50af7d02017-07-25 11:00:17 -07003581 kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xFF000000);
Lynus Vaz856ca602017-05-24 16:56:36 +05303582 kgsl_regrmw(device,
Kyle Piefer50af7d02017-07-25 11:00:17 -07003583 A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xFF, 0x20);
Lynus Vaz856ca602017-05-24 16:56:36 +05303584 kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
3585
3586 return 0;
3587}
3588
Rajesh Kemisetti10bbec92017-10-20 10:55:58 +05303589static void a6xx_efuse_speed_bin(struct adreno_device *adreno_dev)
3590{
3591 unsigned int val;
3592 unsigned int speed_bin[3];
3593 struct kgsl_device *device = &adreno_dev->dev;
3594
3595 if (of_property_read_u32_array(device->pdev->dev.of_node,
3596 "qcom,gpu-speed-bin", speed_bin, 3))
3597 return;
3598
3599 adreno_efuse_read_u32(adreno_dev, speed_bin[0], &val);
3600
3601 adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
3602}
3603
3604static const struct {
3605 int (*check)(struct adreno_device *adreno_dev);
3606 void (*func)(struct adreno_device *adreno_dev);
3607} a6xx_efuse_funcs[] = {
3608 { adreno_is_a615, a6xx_efuse_speed_bin },
Deepak Kumar5287eea2018-03-17 14:33:05 +05303609 { adreno_is_a616, a6xx_efuse_speed_bin },
Rajesh Kemisetti10bbec92017-10-20 10:55:58 +05303610};
3611
3612static void a6xx_check_features(struct adreno_device *adreno_dev)
3613{
3614 unsigned int i;
3615
3616 if (adreno_efuse_map(adreno_dev))
3617 return;
3618 for (i = 0; i < ARRAY_SIZE(a6xx_efuse_funcs); i++) {
3619 if (a6xx_efuse_funcs[i].check(adreno_dev))
3620 a6xx_efuse_funcs[i].func(adreno_dev);
3621 }
3622
3623 adreno_efuse_unmap(adreno_dev);
3624}
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05303625static void a6xx_platform_setup(struct adreno_device *adreno_dev)
3626{
3627 uint64_t addr;
3628 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
3629
3630 /* Calculate SP local and private mem addresses */
3631 addr = ALIGN(ADRENO_UCHE_GMEM_BASE + adreno_dev->gmem_size, SZ_64K);
3632 adreno_dev->sp_local_gpuaddr = addr;
3633 adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
3634
3635 if (adreno_has_gbif(adreno_dev)) {
3636 a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].regs =
3637 a6xx_perfcounters_gbif;
3638 a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].reg_count
3639 = ARRAY_SIZE(a6xx_perfcounters_gbif);
3640
3641 a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs =
3642 a6xx_perfcounters_gbif_pwr;
Deepak Kumar84b9e032017-11-08 13:08:50 +05303643 a6xx_perfcounter_groups[
3644 KGSL_PERFCOUNTER_GROUP_VBIF_PWR].reg_count
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05303645 = ARRAY_SIZE(a6xx_perfcounters_gbif_pwr);
3646
3647 gpudev->vbif_xin_halt_ctrl0_mask =
3648 A6XX_GBIF_HALT_MASK;
3649 } else
3650 gpudev->vbif_xin_halt_ctrl0_mask =
3651 A6XX_VBIF_XIN_HALT_CTRL0_MASK;
Rajesh Kemisetti10bbec92017-10-20 10:55:58 +05303652
3653 /* Check efuse bits for various capabilties */
3654 a6xx_check_features(adreno_dev);
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05303655}
3656
3657
Harshdeep Dhatt6ba7a942017-08-21 17:53:52 -06003658static unsigned int a6xx_ccu_invalidate(struct adreno_device *adreno_dev,
3659 unsigned int *cmds)
3660{
3661 /* CCU_INVALIDATE_DEPTH */
3662 *cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
3663 *cmds++ = 24;
3664
3665 /* CCU_INVALIDATE_COLOR */
3666 *cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
3667 *cmds++ = 25;
3668
3669 return 4;
3670}
3671
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003672/* Register offset defines for A6XX, in order of enum adreno_regs */
3673static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
3674
3675 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A6XX_CP_RB_BASE),
Shrenuj Bansal41665402016-12-16 15:25:54 -08003676 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A6XX_CP_RB_BASE_HI),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003677 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
3678 A6XX_CP_RB_RPTR_ADDR_LO),
3679 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
3680 A6XX_CP_RB_RPTR_ADDR_HI),
3681 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A6XX_CP_RB_RPTR),
3682 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A6XX_CP_RB_WPTR),
3683 ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A6XX_CP_RB_CNTL),
Shrenuj Bansal41665402016-12-16 15:25:54 -08003684 ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A6XX_CP_SQE_CNTL),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003685 ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A6XX_CP_MISC_CNTL),
Carter Cooper8567af02017-03-15 14:22:03 -06003686 ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A6XX_CP_HW_FAULT),
Shrenuj Bansal41665402016-12-16 15:25:54 -08003687 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A6XX_CP_IB1_BASE),
3688 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE_HI, A6XX_CP_IB1_BASE_HI),
3689 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A6XX_CP_IB1_REM_SIZE),
3690 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A6XX_CP_IB2_BASE),
3691 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE_HI, A6XX_CP_IB2_BASE_HI),
3692 ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A6XX_CP_IB2_REM_SIZE),
3693 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_ADDR, A6XX_CP_ROQ_DBG_ADDR),
3694 ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A6XX_CP_ROQ_DBG_DATA),
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06003695 ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT, A6XX_CP_CONTEXT_SWITCH_CNTL),
3696 ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
3697 A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO),
3698 ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
3699 A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI),
Harshdeep Dhatt59a69572017-11-01 14:46:13 -06003700 ADRENO_REG_DEFINE(
3701 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
3702 A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO),
3703 ADRENO_REG_DEFINE(
3704 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
3705 A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI),
3706 ADRENO_REG_DEFINE(
3707 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
3708 A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO),
3709 ADRENO_REG_DEFINE(
3710 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
3711 A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI),
3712 ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
3713 A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO),
3714 ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
3715 A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI),
Harshdeep Dhatt003f6cf2017-12-14 11:00:22 -07003716 ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_LEVEL_STATUS,
3717 A6XX_CP_CONTEXT_SWITCH_LEVEL_STATUS),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003718 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
3719 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
Lynus Vaz107d2892017-03-01 13:48:06 +05303720 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
3721 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
3722 A6XX_RBBM_PERFCTR_LOAD_CMD0),
3723 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
3724 A6XX_RBBM_PERFCTR_LOAD_CMD1),
3725 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
3726 A6XX_RBBM_PERFCTR_LOAD_CMD2),
3727 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
3728 A6XX_RBBM_PERFCTR_LOAD_CMD3),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003729
3730 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A6XX_RBBM_INT_0_MASK),
3731 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A6XX_RBBM_INT_0_STATUS),
3732 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_CLOCK_CTL, A6XX_RBBM_CLOCK_CNTL),
3733 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
3734 A6XX_RBBM_INT_CLEAR_CMD),
3735 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A6XX_RBBM_SW_RESET_CMD),
3736 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD,
3737 A6XX_RBBM_BLOCK_SW_RESET_CMD),
3738 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
3739 A6XX_RBBM_BLOCK_SW_RESET_CMD2),
Lynus Vaz107d2892017-03-01 13:48:06 +05303740 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
3741 A6XX_RBBM_PERFCTR_LOAD_VALUE_LO),
3742 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
3743 A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003744 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION, A6XX_VBIF_VERSION),
Carter Cooperafc85912017-03-20 09:39:18 -06003745 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL0,
3746 A6XX_VBIF_XIN_HALT_CTRL0),
3747 ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
3748 A6XX_VBIF_XIN_HALT_CTRL1),
Rajesh Kemisettid1ca9542017-10-18 15:35:41 +05303749 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_GPR0_CNTL, A6XX_RBBM_GPR0_CNTL),
3750 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_VBIF_GX_RESET_STATUS,
3751 A6XX_RBBM_VBIF_GX_RESET_STATUS),
Rajesh Kemisetti77b82ed2017-09-24 20:42:41 +05303752 ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT, A6XX_GBIF_HALT),
3753 ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT_ACK, A6XX_GBIF_HALT_ACK),
Kyle Pieferb1027b02017-02-10 13:58:58 -08003754 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
3755 A6XX_GMU_ALWAYS_ON_COUNTER_L),
3756 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
3757 A6XX_GMU_ALWAYS_ON_COUNTER_H),
Kyle Pieferda0fa542017-08-04 13:39:40 -07003758 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
3759 A6XX_GMU_AO_AHB_FENCE_CTRL),
Kyle Pieferb1027b02017-02-10 13:58:58 -08003760 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_INTERRUPT_EN,
3761 A6XX_GMU_AO_INTERRUPT_EN),
Kyle Piefere7b06b42017-04-06 13:53:01 -07003762 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
3763 A6XX_GMU_AO_HOST_INTERRUPT_CLR),
3764 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
3765 A6XX_GMU_AO_HOST_INTERRUPT_STATUS),
3766 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
3767 A6XX_GMU_AO_HOST_INTERRUPT_MASK),
Kyle Pieferb1027b02017-02-10 13:58:58 -08003768 ADRENO_REG_DEFINE(ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
3769 A6XX_GMU_GMU_PWR_COL_KEEPALIVE),
3770 ADRENO_REG_DEFINE(ADRENO_REG_GMU_AHB_FENCE_STATUS,
3771 A6XX_GMU_AHB_FENCE_STATUS),
3772 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_CTRL_STATUS,
3773 A6XX_GMU_HFI_CTRL_STATUS),
3774 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_VERSION_INFO,
3775 A6XX_GMU_HFI_VERSION_INFO),
3776 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HFI_SFR_ADDR,
3777 A6XX_GMU_HFI_SFR_ADDR),
3778 ADRENO_REG_DEFINE(ADRENO_REG_GMU_RPMH_POWER_STATE,
George Shenf2d4e052017-05-11 16:28:23 -07003779 A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE),
Kyle Pieferb1027b02017-02-10 13:58:58 -08003780 ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
3781 A6XX_GMU_GMU2HOST_INTR_CLR),
3782 ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
3783 A6XX_GMU_GMU2HOST_INTR_INFO),
Kyle Piefere7b06b42017-04-06 13:53:01 -07003784 ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
3785 A6XX_GMU_GMU2HOST_INTR_MASK),
Kyle Pieferb1027b02017-02-10 13:58:58 -08003786 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_SET,
3787 A6XX_GMU_HOST2GMU_INTR_SET),
3788 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
3789 A6XX_GMU_HOST2GMU_INTR_CLR),
3790 ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
3791 A6XX_GMU_HOST2GMU_INTR_RAW_INFO),
George Shen6927d8f2017-07-19 11:38:10 -07003792 ADRENO_REG_DEFINE(ADRENO_REG_GMU_NMI_CONTROL_STATUS,
3793 A6XX_GMU_NMI_CONTROL_STATUS),
3794 ADRENO_REG_DEFINE(ADRENO_REG_GMU_CM3_CFG,
3795 A6XX_GMU_CM3_CFG),
Deepak Kumar7c6f0082018-04-27 15:23:10 +05303796 ADRENO_REG_DEFINE(ADRENO_REG_GMU_RBBM_INT_UNMASKED_STATUS,
3797 A6XX_GMU_RBBM_INT_UNMASKED_STATUS),
Carter Cooper4a313ae2017-02-23 11:11:56 -07003798 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
3799 A6XX_RBBM_SECVID_TRUST_CNTL),
3800 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
3801 A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO),
3802 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
3803 A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI),
3804 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
3805 A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE),
3806 ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
3807 A6XX_RBBM_SECVID_TSB_CNTL),
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003808};
3809
3810static const struct adreno_reg_offsets a6xx_reg_offsets = {
3811 .offsets = a6xx_register_offsets,
3812 .offset_0 = ADRENO_REG_REGISTER_MAX,
3813};
3814
Tarun Karra1382e512017-10-30 19:41:25 -07003815static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
3816 struct adreno_perfcount_register *reg, bool update_reg)
3817{
3818 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
3819 struct cpu_gpu_lock *lock = adreno_dev->pwrup_reglist.hostptr;
3820 struct reg_list_pair *reg_pair = (struct reg_list_pair *)(lock + 1);
3821 unsigned int i;
3822 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
3823 int ret = 0;
3824
3825 lock->flag_kmd = 1;
3826 /* Write flag_kmd before turn */
3827 wmb();
3828 lock->turn = 0;
3829 /* Write these fields before looping */
3830 mb();
3831
3832 /*
3833 * Spin here while GPU ucode holds the lock, lock->flag_ucode will
3834 * be set to 0 after GPU ucode releases the lock. Minimum wait time
3835 * is 1 second and this should be enough for GPU to release the lock
3836 */
3837 while (lock->flag_ucode == 1 && lock->turn == 0) {
3838 cpu_relax();
3839 /* Get the latest updates from GPU */
3840 rmb();
3841 /*
3842 * Make sure we wait at least 1sec for the lock,
3843 * if we did not get it after 1sec return an error.
3844 */
3845 if (time_after(jiffies, timeout) &&
3846 (lock->flag_ucode == 1 && lock->turn == 0)) {
3847 ret = -EBUSY;
3848 goto unlock;
3849 }
3850 }
3851
3852 /* Read flag_ucode and turn before list_length */
3853 rmb();
3854 /*
3855 * If the perfcounter select register is already present in reglist
3856 * update it, otherwise append the <select register, value> pair to
3857 * the end of the list.
3858 */
3859 for (i = 0; i < lock->list_length >> 1; i++)
3860 if (reg_pair[i].offset == reg->select)
3861 break;
3862
3863 reg_pair[i].offset = reg->select;
3864 reg_pair[i].val = reg->countable;
3865 if (i == lock->list_length >> 1)
3866 lock->list_length += 2;
3867
3868 if (update_reg)
3869 kgsl_regwrite(device, reg->select, reg->countable);
3870
3871unlock:
3872 /* All writes done before releasing the lock */
3873 wmb();
3874 lock->flag_kmd = 0;
3875 return ret;
3876}
3877
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003878struct adreno_gpudev adreno_a6xx_gpudev = {
3879 .reg_offsets = &a6xx_reg_offsets,
3880 .start = a6xx_start,
Shrenuj Bansal41665402016-12-16 15:25:54 -08003881 .snapshot = a6xx_snapshot,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003882 .irq = &a6xx_irq,
Shrenuj Bansal41665402016-12-16 15:25:54 -08003883 .snapshot_data = &a6xx_snapshot_data,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003884 .irq_trace = trace_kgsl_a5xx_irq_status,
3885 .num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
3886 .platform_setup = a6xx_platform_setup,
Shrenuj Bansal41665402016-12-16 15:25:54 -08003887 .init = a6xx_init,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003888 .rb_start = a6xx_rb_start,
3889 .regulator_enable = a6xx_sptprac_enable,
3890 .regulator_disable = a6xx_sptprac_disable,
Lynus Vaz107d2892017-03-01 13:48:06 +05303891 .perfcounters = &a6xx_perfcounters,
Lynus Vaz856ca602017-05-24 16:56:36 +05303892 .enable_pwr_counters = a6xx_enable_pwr_counters,
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07003893 .count_throttles = a6xx_count_throttles,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003894 .microcode_read = a6xx_microcode_read,
3895 .enable_64bit = a6xx_enable_64bit,
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06003896 .llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07003897 .llc_configure_gpuhtw_scid = a6xx_llc_configure_gpuhtw_scid,
Kyle Piefer11a48b62017-03-17 14:53:40 -07003898 .llc_enable_overrides = a6xx_llc_enable_overrides,
Kyle Pieferb1027b02017-02-10 13:58:58 -08003899 .oob_set = a6xx_oob_set,
3900 .oob_clear = a6xx_oob_clear,
Carter Cooperdf7ba702017-03-20 11:28:04 -06003901 .gpu_keepalive = a6xx_gpu_keepalive,
Kyle Pieferb1027b02017-02-10 13:58:58 -08003902 .rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
Oleg Perelet62d5cec2017-03-27 16:14:52 -07003903 .hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
Kyle Piefer4033f562017-08-16 10:00:48 -07003904 .wait_for_lowest_idle = a6xx_wait_for_lowest_idle,
Lynus Vaz1fde74d2017-03-20 18:02:47 +05303905 .wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
3906 .iommu_fault_block = a6xx_iommu_fault_block,
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07003907 .reset = a6xx_reset,
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07003908 .soft_reset = a6xx_soft_reset,
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06003909 .preemption_pre_ibsubmit = a6xx_preemption_pre_ibsubmit,
3910 .preemption_post_ibsubmit = a6xx_preemption_post_ibsubmit,
3911 .preemption_init = a6xx_preemption_init,
3912 .preemption_schedule = a6xx_preemption_schedule,
Harshdeep Dhattaae850c2017-08-21 17:19:26 -06003913 .set_marker = a6xx_set_marker,
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -06003914 .preemption_context_init = a6xx_preemption_context_init,
3915 .preemption_context_destroy = a6xx_preemption_context_destroy,
Shrenuj Bansald197bf62017-04-07 11:00:09 -07003916 .gx_is_on = a6xx_gx_is_on,
3917 .sptprac_is_on = a6xx_sptprac_is_on,
Harshdeep Dhatt6ba7a942017-08-21 17:53:52 -06003918 .ccu_invalidate = a6xx_ccu_invalidate,
Tarun Karra1382e512017-10-30 19:41:25 -07003919 .perfcounter_update = a6xx_perfcounter_update,
Lokesh Batraa8300e02017-05-25 11:17:40 -07003920 .coresight = {&a6xx_coresight, &a6xx_coresight_cx},
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07003921};