blob: 86a2c9fb0f75a49c4376b6b0071e20c543b53685 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __ADRENO_H
14#define __ADRENO_H
15
16#include "kgsl_device.h"
17#include "kgsl_sharedmem.h"
18#include "adreno_drawctxt.h"
19#include "adreno_ringbuffer.h"
20#include "adreno_profile.h"
21#include "adreno_dispatch.h"
22#include "kgsl_iommu.h"
23#include "adreno_perfcounter.h"
24#include <linux/stat.h>
25#include <linux/delay.h>
Carter Cooper05f2a6b2017-03-20 11:43:11 -060026#include "kgsl_gmu.h"
Shrenuj Bansala419c792016-10-20 14:05:11 -070027
28#include "a4xx_reg.h"
29
30#ifdef CONFIG_QCOM_OCMEM
31#include <soc/qcom/ocmem.h>
32#endif
33
34#define DEVICE_3D_NAME "kgsl-3d"
35#define DEVICE_3D0_NAME "kgsl-3d0"
36
37/* ADRENO_DEVICE - Given a kgsl_device return the adreno device struct */
38#define ADRENO_DEVICE(device) \
39 container_of(device, struct adreno_device, dev)
40
41/* KGSL_DEVICE - given an adreno_device, return the KGSL device struct */
42#define KGSL_DEVICE(_dev) (&((_dev)->dev))
43
44/* ADRENO_CONTEXT - Given a context return the adreno context struct */
45#define ADRENO_CONTEXT(context) \
46 container_of(context, struct adreno_context, base)
47
48/* ADRENO_GPU_DEVICE - Given an adreno device return the GPU specific struct */
49#define ADRENO_GPU_DEVICE(_a) ((_a)->gpucore->gpudev)
50
51#define ADRENO_CHIPID_CORE(_id) (((_id) >> 24) & 0xFF)
52#define ADRENO_CHIPID_MAJOR(_id) (((_id) >> 16) & 0xFF)
53#define ADRENO_CHIPID_MINOR(_id) (((_id) >> 8) & 0xFF)
54#define ADRENO_CHIPID_PATCH(_id) ((_id) & 0xFF)
55
56/* ADRENO_GPUREV - Return the GPU ID for the given adreno_device */
57#define ADRENO_GPUREV(_a) ((_a)->gpucore->gpurev)
58
59/*
60 * ADRENO_FEATURE - return true if the specified feature is supported by the GPU
61 * core
62 */
63#define ADRENO_FEATURE(_dev, _bit) \
64 ((_dev)->gpucore->features & (_bit))
65
66/**
67 * ADRENO_QUIRK - return true if the specified quirk is required by the GPU
68 */
69#define ADRENO_QUIRK(_dev, _bit) \
70 ((_dev)->quirks & (_bit))
71
72/*
73 * ADRENO_PREEMPT_STYLE - return preemption style
74 */
75#define ADRENO_PREEMPT_STYLE(flags) \
76 ((flags & KGSL_CONTEXT_PREEMPT_STYLE_MASK) >> \
77 KGSL_CONTEXT_PREEMPT_STYLE_SHIFT)
78
79/*
80 * return the dispatcher drawqueue in which the given drawobj should
81 * be submitted
82 */
83#define ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(c) \
84 (&((ADRENO_CONTEXT(c->context))->rb->dispatch_q))
85
86#define ADRENO_DRAWOBJ_RB(c) \
87 ((ADRENO_CONTEXT(c->context))->rb)
88
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070089#define ADRENO_FW(a, f) (&(a->fw[f]))
90
Shrenuj Bansala419c792016-10-20 14:05:11 -070091/* Adreno core features */
92/* The core uses OCMEM for GMEM/binning memory */
93#define ADRENO_USES_OCMEM BIT(0)
94/* The core supports an accelerated warm start */
95#define ADRENO_WARM_START BIT(1)
96/* The core supports the microcode bootstrap functionality */
97#define ADRENO_USE_BOOTSTRAP BIT(2)
98/* The core supports SP/TP hw controlled power collapse */
99#define ADRENO_SPTP_PC BIT(3)
100/* The core supports Peak Power Detection(PPD)*/
101#define ADRENO_PPD BIT(4)
102/* The GPU supports content protection */
103#define ADRENO_CONTENT_PROTECTION BIT(5)
104/* The GPU supports preemption */
105#define ADRENO_PREEMPTION BIT(6)
106/* The core uses GPMU for power and limit management */
107#define ADRENO_GPMU BIT(7)
108/* The GPMU supports Limits Management */
109#define ADRENO_LM BIT(8)
110/* The core uses 64 bit GPU addresses */
111#define ADRENO_64BIT BIT(9)
112/* The GPU supports retention for cpz registers */
113#define ADRENO_CPZ_RETENTION BIT(10)
Shrenuj Bansalae672812016-02-24 14:17:30 -0800114/* The core has soft fault detection available */
115#define ADRENO_SOFT_FAULT_DETECT BIT(11)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800116/* The GMU supports RPMh for power management*/
117#define ADRENO_RPMH BIT(12)
118/* The GMU supports IFPC power management*/
119#define ADRENO_IFPC BIT(13)
120/* The GMU supports HW based NAP */
121#define ADRENO_HW_NAP BIT(14)
122/* The GMU supports min voltage*/
123#define ADRENO_MIN_VOLT BIT(15)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700124
125/*
126 * Adreno GPU quirks - control bits for various workarounds
127 */
128
Lynus Vaz85c8cee2017-03-07 11:31:02 +0530129/* Set TWOPASSUSEWFI in PC_DBG_ECO_CNTL (5XX/6XX) */
Shrenuj Bansala419c792016-10-20 14:05:11 -0700130#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
131/* Lock/unlock mutex to sync with the IOMMU */
132#define ADRENO_QUIRK_IOMMU_SYNC BIT(1)
133/* Submit critical packets at GPU wake up */
134#define ADRENO_QUIRK_CRITICAL_PACKETS BIT(2)
135/* Mask out RB1-3 activity signals from HW hang detection logic */
136#define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(3)
137/* Disable RB sampler datapath clock gating optimization */
138#define ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING BIT(4)
139/* Disable local memory(LM) feature to avoid corner case error */
140#define ADRENO_QUIRK_DISABLE_LMLOADKILL BIT(5)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800141/* Allow HFI to use registers to send message to GMU */
142#define ADRENO_QUIRK_HFI_USE_REG BIT(6)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700143
144/* Flags to control command packet settings */
145#define KGSL_CMD_FLAGS_NONE 0
146#define KGSL_CMD_FLAGS_PMODE BIT(0)
147#define KGSL_CMD_FLAGS_INTERNAL_ISSUE BIT(1)
148#define KGSL_CMD_FLAGS_WFI BIT(2)
149#define KGSL_CMD_FLAGS_PROFILE BIT(3)
150#define KGSL_CMD_FLAGS_PWRON_FIXUP BIT(4)
151
152/* Command identifiers */
153#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
154#define KGSL_CMD_IDENTIFIER 0x2EEDFACE
155#define KGSL_CMD_INTERNAL_IDENTIFIER 0x2EEDD00D
156#define KGSL_START_OF_IB_IDENTIFIER 0x2EADEABE
157#define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD
158#define KGSL_START_OF_PROFILE_IDENTIFIER 0x2DEFADE1
159#define KGSL_END_OF_PROFILE_IDENTIFIER 0x2DEFADE2
160#define KGSL_PWRON_FIXUP_IDENTIFIER 0x2AFAFAFA
161
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700162/* Number of times to try hard reset */
163#define NUM_TIMES_RESET_RETRY 5
164
Shrenuj Bansala419c792016-10-20 14:05:11 -0700165/* One cannot wait forever for the core to idle, so set an upper limit to the
166 * amount of time to wait for the core to go idle
167 */
Shrenuj Bansala419c792016-10-20 14:05:11 -0700168#define ADRENO_IDLE_TIMEOUT (20 * 1000)
169
170#define ADRENO_UCHE_GMEM_BASE 0x100000
171
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700172#define ADRENO_FW_PFP 0
173#define ADRENO_FW_SQE 0
174#define ADRENO_FW_PM4 1
175
Shrenuj Bansala419c792016-10-20 14:05:11 -0700176enum adreno_gpurev {
177 ADRENO_REV_UNKNOWN = 0,
178 ADRENO_REV_A304 = 304,
179 ADRENO_REV_A305 = 305,
180 ADRENO_REV_A305C = 306,
181 ADRENO_REV_A306 = 307,
182 ADRENO_REV_A306A = 308,
183 ADRENO_REV_A310 = 310,
184 ADRENO_REV_A320 = 320,
185 ADRENO_REV_A330 = 330,
186 ADRENO_REV_A305B = 335,
187 ADRENO_REV_A405 = 405,
188 ADRENO_REV_A418 = 418,
189 ADRENO_REV_A420 = 420,
190 ADRENO_REV_A430 = 430,
191 ADRENO_REV_A505 = 505,
192 ADRENO_REV_A506 = 506,
Rajesh Kemisettiaed6ec72017-02-06 09:37:00 +0530193 ADRENO_REV_A508 = 508,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700194 ADRENO_REV_A510 = 510,
195 ADRENO_REV_A512 = 512,
196 ADRENO_REV_A530 = 530,
197 ADRENO_REV_A540 = 540,
Rajesh Kemisetti8d5cc6e2017-06-06 16:44:17 +0530198 ADRENO_REV_A615 = 615,
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700199 ADRENO_REV_A630 = 630,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700200};
201
202#define ADRENO_START_WARM 0
203#define ADRENO_START_COLD 1
204
205#define ADRENO_SOFT_FAULT BIT(0)
206#define ADRENO_HARD_FAULT BIT(1)
207#define ADRENO_TIMEOUT_FAULT BIT(2)
208#define ADRENO_IOMMU_PAGE_FAULT BIT(3)
209#define ADRENO_PREEMPT_FAULT BIT(4)
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700210#define ADRENO_GMU_FAULT BIT(5)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700211
212#define ADRENO_SPTP_PC_CTRL 0
213#define ADRENO_PPD_CTRL 1
214#define ADRENO_LM_CTRL 2
215#define ADRENO_HWCG_CTRL 3
216#define ADRENO_THROTTLING_CTRL 4
217
218
219/* number of throttle counters for DCVS adjustment */
220#define ADRENO_GPMU_THROTTLE_COUNTERS 4
221/* base for throttle counters */
222#define ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG 43
223
224struct adreno_gpudev;
225
226/* Time to allow preemption to complete (in ms) */
227#define ADRENO_PREEMPT_TIMEOUT 10000
228
229#define ADRENO_INT_BIT(a, _bit) (((a)->gpucore->gpudev->int_bits) ? \
230 (adreno_get_int(a, _bit) < 0 ? 0 : \
231 BIT(adreno_get_int(a, _bit))) : 0)
232
233/**
234 * enum adreno_preempt_states
235 * ADRENO_PREEMPT_NONE: No preemption is scheduled
236 * ADRENO_PREEMPT_START: The S/W has started
237 * ADRENO_PREEMPT_TRIGGERED: A preeempt has been triggered in the HW
238 * ADRENO_PREEMPT_FAULTED: The preempt timer has fired
239 * ADRENO_PREEMPT_PENDING: The H/W has signaled preemption complete
240 * ADRENO_PREEMPT_COMPLETE: Preemption could not be finished in the IRQ handler,
241 * worker has been scheduled
242 */
243enum adreno_preempt_states {
244 ADRENO_PREEMPT_NONE = 0,
245 ADRENO_PREEMPT_START,
246 ADRENO_PREEMPT_TRIGGERED,
247 ADRENO_PREEMPT_FAULTED,
248 ADRENO_PREEMPT_PENDING,
249 ADRENO_PREEMPT_COMPLETE,
250};
251
252/**
253 * struct adreno_preemption
254 * @state: The current state of preemption
255 * @counters: Memory descriptor for the memory where the GPU writes the
256 * preemption counters on switch
257 * @timer: A timer to make sure preemption doesn't stall
258 * @work: A work struct for the preemption worker (for 5XX)
259 * @token_submit: Indicates if a preempt token has been submitted in
260 * current ringbuffer (for 4XX)
261 */
262struct adreno_preemption {
263 atomic_t state;
264 struct kgsl_memdesc counters;
265 struct timer_list timer;
266 struct work_struct work;
267 bool token_submit;
268};
269
270
271struct adreno_busy_data {
272 unsigned int gpu_busy;
273 unsigned int vbif_ram_cycles;
274 unsigned int vbif_starved_ram;
275 unsigned int throttle_cycles[ADRENO_GPMU_THROTTLE_COUNTERS];
276};
277
278/**
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700279 * struct adreno_firmware - Struct holding fw details
280 * @fwvirt: Buffer which holds the ucode
281 * @size: Size of ucode buffer
282 * @version: Version of ucode
283 * @memdesc: Memory descriptor which holds ucode buffer info
284 */
285struct adreno_firmware {
286 unsigned int *fwvirt;
287 size_t size;
288 unsigned int version;
289 struct kgsl_memdesc memdesc;
290};
291
292/**
Shrenuj Bansala419c792016-10-20 14:05:11 -0700293 * struct adreno_gpu_core - A specific GPU core definition
294 * @gpurev: Unique GPU revision identifier
295 * @core: Match for the core version of the GPU
296 * @major: Match for the major version of the GPU
297 * @minor: Match for the minor version of the GPU
298 * @patchid: Match for the patch revision of the GPU
299 * @features: Common adreno features supported by this core
300 * @pm4fw_name: Filename for th PM4 firmware
301 * @pfpfw_name: Filename for the PFP firmware
302 * @zap_name: Filename for the Zap Shader ucode
303 * @gpudev: Pointer to the GPU family specific functions for this core
304 * @gmem_size: Amount of binning memory (GMEM/OCMEM) to reserve for the core
305 * @pm4_jt_idx: Index of the jump table in the PM4 microcode
306 * @pm4_jt_addr: Address offset to load the jump table for the PM4 microcode
307 * @pfp_jt_idx: Index of the jump table in the PFP microcode
308 * @pfp_jt_addr: Address offset to load the jump table for the PFP microcode
309 * @pm4_bstrp_size: Size of the bootstrap loader for PM4 microcode
310 * @pfp_bstrp_size: Size of the bootstrap loader for PFP microcde
311 * @pfp_bstrp_ver: Version of the PFP microcode that supports bootstraping
312 * @shader_offset: Offset of shader from gpu reg base
313 * @shader_size: Shader size
314 * @num_protected_regs: number of protected registers
315 * @gpmufw_name: Filename for the GPMU firmware
316 * @gpmu_major: Match for the GPMU & firmware, major revision
317 * @gpmu_minor: Match for the GPMU & firmware, minor revision
318 * @gpmu_features: Supported features for any given GPMU version
319 * @busy_mask: mask to check if GPU is busy in RBBM_STATUS
320 * @lm_major: Limits Management register sequence, major revision
321 * @lm_minor: LM register sequence, minor revision
322 * @regfw_name: Filename for the register sequence firmware
323 * @gpmu_tsens: ID for the temporature sensor used by the GPMU
324 * @max_power: Max possible power draw of a core, units elephant tail hairs
325 */
326struct adreno_gpu_core {
327 enum adreno_gpurev gpurev;
328 unsigned int core, major, minor, patchid;
329 unsigned long features;
330 const char *pm4fw_name;
331 const char *pfpfw_name;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700332 const char *sqefw_name;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700333 const char *zap_name;
334 struct adreno_gpudev *gpudev;
335 size_t gmem_size;
336 unsigned int pm4_jt_idx;
337 unsigned int pm4_jt_addr;
338 unsigned int pfp_jt_idx;
339 unsigned int pfp_jt_addr;
340 unsigned int pm4_bstrp_size;
341 unsigned int pfp_bstrp_size;
342 unsigned int pfp_bstrp_ver;
343 unsigned long shader_offset;
344 unsigned int shader_size;
345 unsigned int num_protected_regs;
346 const char *gpmufw_name;
347 unsigned int gpmu_major;
348 unsigned int gpmu_minor;
349 unsigned int gpmu_features;
350 unsigned int busy_mask;
351 unsigned int lm_major, lm_minor;
352 const char *regfw_name;
353 unsigned int gpmu_tsens;
354 unsigned int max_power;
355};
356
357/**
358 * struct adreno_device - The mothership structure for all adreno related info
359 * @dev: Reference to struct kgsl_device
360 * @priv: Holds the private flags specific to the adreno_device
361 * @chipid: Chip ID specific to the GPU
362 * @gmem_base: Base physical address of GMEM
363 * @gmem_size: GMEM size
364 * @gpucore: Pointer to the adreno_gpu_core structure
365 * @pfp_fw: Buffer which holds the pfp ucode
366 * @pfp_fw_size: Size of pfp ucode buffer
367 * @pfp_fw_version: Version of pfp ucode
368 * @pfp: Memory descriptor which holds pfp ucode buffer info
369 * @pm4_fw: Buffer which holds the pm4 ucode
370 * @pm4_fw_size: Size of pm4 ucode buffer
371 * @pm4_fw_version: Version of pm4 ucode
372 * @pm4: Memory descriptor which holds pm4 ucode buffer info
373 * @gpmu_cmds_size: Length of gpmu cmd stream
374 * @gpmu_cmds: gpmu cmd stream
375 * @ringbuffers: Array of pointers to adreno_ringbuffers
376 * @num_ringbuffers: Number of ringbuffers for the GPU
377 * @cur_rb: Pointer to the current ringbuffer
378 * @next_rb: Ringbuffer we are switching to during preemption
379 * @prev_rb: Ringbuffer we are switching from during preemption
380 * @fast_hang_detect: Software fault detection availability
381 * @ft_policy: Defines the fault tolerance policy
382 * @long_ib_detect: Long IB detection availability
383 * @ft_pf_policy: Defines the fault policy for page faults
384 * @ocmem_hdl: Handle to the ocmem allocated buffer
385 * @profile: Container for adreno profiler information
386 * @dispatcher: Container for adreno GPU dispatcher
387 * @pwron_fixup: Command buffer to run a post-power collapse shader workaround
388 * @pwron_fixup_dwords: Number of dwords in the command buffer
389 * @input_work: Work struct for turning on the GPU after a touch event
390 * @busy_data: Struct holding GPU VBIF busy stats
391 * @ram_cycles_lo: Number of DDR clock cycles for the monitor session
392 * @perfctr_pwr_lo: Number of cycles VBIF is stalled by DDR
393 * @halt: Atomic variable to check whether the GPU is currently halted
Deepak Kumar273c5712017-01-03 21:49:03 +0530394 * @pending_irq_refcnt: Atomic variable to keep track of running IRQ handlers
Shrenuj Bansala419c792016-10-20 14:05:11 -0700395 * @ctx_d_debugfs: Context debugfs node
396 * @pwrctrl_flag: Flag to hold adreno specific power attributes
397 * @profile_buffer: Memdesc holding the drawobj profiling buffer
398 * @profile_index: Index to store the start/stop ticks in the profiling
399 * buffer
400 * @sp_local_gpuaddr: Base GPU virtual address for SP local memory
401 * @sp_pvt_gpuaddr: Base GPU virtual address for SP private memory
402 * @lm_fw: The LM firmware handle
403 * @lm_sequence: Pointer to the start of the register write sequence for LM
404 * @lm_size: The dword size of the LM sequence
405 * @lm_limit: limiting value for LM
406 * @lm_threshold_count: register value for counter for lm threshold breakin
407 * @lm_threshold_cross: number of current peaks exceeding threshold
408 * @speed_bin: Indicate which power level set to use
409 * @csdev: Pointer to a coresight device (if applicable)
410 * @gpmu_throttle_counters - counteers for number of throttled clocks
411 * @irq_storm_work: Worker to handle possible interrupt storms
412 * @active_list: List to track active contexts
413 * @active_list_lock: Lock to protect active_list
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -0600414 * @gpu_llc_slice: GPU system cache slice descriptor
Sushmita Susheelendrab1976682016-11-07 14:21:11 -0700415 * @gpu_llc_slice_enable: To enable the GPU system cache slice or not
Sushmita Susheelendra906564d2017-01-10 15:53:55 -0700416 * @gpuhtw_llc_slice: GPU pagetables system cache slice descriptor
Sushmita Susheelendrad3756c02017-01-11 15:05:40 -0700417 * @gpuhtw_llc_slice_enable: To enable the GPUHTW system cache slice or not
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -0600418 * @zap_loaded: Used to track if zap was successfully loaded or not
Shrenuj Bansala419c792016-10-20 14:05:11 -0700419 */
420struct adreno_device {
421 struct kgsl_device dev; /* Must be first field in this struct */
422 unsigned long priv;
423 unsigned int chipid;
424 unsigned long gmem_base;
425 unsigned long gmem_size;
426 const struct adreno_gpu_core *gpucore;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700427 struct adreno_firmware fw[2];
Shrenuj Bansala419c792016-10-20 14:05:11 -0700428 size_t gpmu_cmds_size;
429 unsigned int *gpmu_cmds;
430 struct adreno_ringbuffer ringbuffers[KGSL_PRIORITY_MAX_RB_LEVELS];
431 int num_ringbuffers;
432 struct adreno_ringbuffer *cur_rb;
433 struct adreno_ringbuffer *next_rb;
434 struct adreno_ringbuffer *prev_rb;
435 unsigned int fast_hang_detect;
436 unsigned long ft_policy;
437 unsigned int long_ib_detect;
438 unsigned long ft_pf_policy;
439 struct ocmem_buf *ocmem_hdl;
440 struct adreno_profile profile;
441 struct adreno_dispatcher dispatcher;
442 struct kgsl_memdesc pwron_fixup;
443 unsigned int pwron_fixup_dwords;
444 struct work_struct input_work;
445 struct adreno_busy_data busy_data;
446 unsigned int ram_cycles_lo;
447 unsigned int starved_ram_lo;
448 unsigned int perfctr_pwr_lo;
449 atomic_t halt;
Deepak Kumar273c5712017-01-03 21:49:03 +0530450 atomic_t pending_irq_refcnt;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700451 struct dentry *ctx_d_debugfs;
452 unsigned long pwrctrl_flag;
453
454 struct kgsl_memdesc profile_buffer;
455 unsigned int profile_index;
456 uint64_t sp_local_gpuaddr;
457 uint64_t sp_pvt_gpuaddr;
458 const struct firmware *lm_fw;
459 uint32_t *lm_sequence;
460 uint32_t lm_size;
461 struct adreno_preemption preempt;
462 struct work_struct gpmu_work;
463 uint32_t lm_leakage;
464 uint32_t lm_limit;
465 uint32_t lm_threshold_count;
466 uint32_t lm_threshold_cross;
467
468 unsigned int speed_bin;
469 unsigned int quirks;
470
471 struct coresight_device *csdev;
472 uint32_t gpmu_throttle_counters[ADRENO_GPMU_THROTTLE_COUNTERS];
473 struct work_struct irq_storm_work;
474
475 struct list_head active_list;
476 spinlock_t active_list_lock;
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -0600477
478 void *gpu_llc_slice;
Sushmita Susheelendrab1976682016-11-07 14:21:11 -0700479 bool gpu_llc_slice_enable;
Sushmita Susheelendra906564d2017-01-10 15:53:55 -0700480 void *gpuhtw_llc_slice;
Sushmita Susheelendrad3756c02017-01-11 15:05:40 -0700481 bool gpuhtw_llc_slice_enable;
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -0600482 unsigned int zap_loaded;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700483};
484
485/**
486 * enum adreno_device_flags - Private flags for the adreno_device
487 * @ADRENO_DEVICE_PWRON - Set during init after a power collapse
488 * @ADRENO_DEVICE_PWRON_FIXUP - Set if the target requires the shader fixup
489 * after power collapse
490 * @ADRENO_DEVICE_CORESIGHT - Set if the coresight (trace bus) registers should
491 * be restored after power collapse
492 * @ADRENO_DEVICE_HANG_INTR - Set if the hang interrupt should be enabled for
493 * this target
494 * @ADRENO_DEVICE_STARTED - Set if the device start sequence is in progress
495 * @ADRENO_DEVICE_FAULT - Set if the device is currently in fault (and shouldn't
496 * send any more commands to the ringbuffer)
497 * @ADRENO_DEVICE_DRAWOBJ_PROFILE - Set if the device supports drawobj
498 * profiling via the ALWAYSON counter
499 * @ADRENO_DEVICE_PREEMPTION - Turn on/off preemption
500 * @ADRENO_DEVICE_SOFT_FAULT_DETECT - Set if soft fault detect is enabled
501 * @ADRENO_DEVICE_GPMU_INITIALIZED - Set if GPMU firmware initialization succeed
502 * @ADRENO_DEVICE_ISDB_ENABLED - Set if the Integrated Shader DeBugger is
503 * attached and enabled
504 * @ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED - Set if a CACHE_FLUSH_TS irq storm
505 * is in progress
Kyle Piefere923b7a2017-03-28 17:31:48 -0700506 * @ADRENO_DEVICE_HARD_RESET - Set if soft reset fails and hard reset is needed
Shrenuj Bansala419c792016-10-20 14:05:11 -0700507 */
508enum adreno_device_flags {
509 ADRENO_DEVICE_PWRON = 0,
510 ADRENO_DEVICE_PWRON_FIXUP = 1,
511 ADRENO_DEVICE_INITIALIZED = 2,
512 ADRENO_DEVICE_CORESIGHT = 3,
513 ADRENO_DEVICE_HANG_INTR = 4,
514 ADRENO_DEVICE_STARTED = 5,
515 ADRENO_DEVICE_FAULT = 6,
516 ADRENO_DEVICE_DRAWOBJ_PROFILE = 7,
517 ADRENO_DEVICE_GPU_REGULATOR_ENABLED = 8,
518 ADRENO_DEVICE_PREEMPTION = 9,
519 ADRENO_DEVICE_SOFT_FAULT_DETECT = 10,
520 ADRENO_DEVICE_GPMU_INITIALIZED = 11,
521 ADRENO_DEVICE_ISDB_ENABLED = 12,
522 ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED = 13,
Kyle Piefere923b7a2017-03-28 17:31:48 -0700523 ADRENO_DEVICE_HARD_RESET = 14,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700524};
525
526/**
527 * struct adreno_drawobj_profile_entry - a single drawobj entry in the
528 * kernel profiling buffer
529 * @started: Number of GPU ticks at start of the drawobj
530 * @retired: Number of GPU ticks at the end of the drawobj
531 */
532struct adreno_drawobj_profile_entry {
533 uint64_t started;
534 uint64_t retired;
535};
536
537#define ADRENO_DRAWOBJ_PROFILE_COUNT \
538 (PAGE_SIZE / sizeof(struct adreno_drawobj_profile_entry))
539
540#define ADRENO_DRAWOBJ_PROFILE_OFFSET(_index, _member) \
541 ((_index) * sizeof(struct adreno_drawobj_profile_entry) \
542 + offsetof(struct adreno_drawobj_profile_entry, _member))
543
544
545/**
546 * adreno_regs: List of registers that are used in kgsl driver for all
547 * 3D devices. Each device type has different offset value for the same
548 * register, so an array of register offsets are declared for every device
549 * and are indexed by the enumeration values defined in this enum
550 */
551enum adreno_regs {
552 ADRENO_REG_CP_ME_RAM_WADDR,
553 ADRENO_REG_CP_ME_RAM_DATA,
554 ADRENO_REG_CP_PFP_UCODE_DATA,
555 ADRENO_REG_CP_PFP_UCODE_ADDR,
556 ADRENO_REG_CP_WFI_PEND_CTR,
557 ADRENO_REG_CP_RB_BASE,
558 ADRENO_REG_CP_RB_BASE_HI,
559 ADRENO_REG_CP_RB_RPTR_ADDR_LO,
560 ADRENO_REG_CP_RB_RPTR_ADDR_HI,
561 ADRENO_REG_CP_RB_RPTR,
562 ADRENO_REG_CP_RB_WPTR,
563 ADRENO_REG_CP_CNTL,
564 ADRENO_REG_CP_ME_CNTL,
565 ADRENO_REG_CP_RB_CNTL,
566 ADRENO_REG_CP_IB1_BASE,
567 ADRENO_REG_CP_IB1_BASE_HI,
568 ADRENO_REG_CP_IB1_BUFSZ,
569 ADRENO_REG_CP_IB2_BASE,
570 ADRENO_REG_CP_IB2_BASE_HI,
571 ADRENO_REG_CP_IB2_BUFSZ,
572 ADRENO_REG_CP_TIMESTAMP,
573 ADRENO_REG_CP_SCRATCH_REG6,
574 ADRENO_REG_CP_SCRATCH_REG7,
575 ADRENO_REG_CP_ME_RAM_RADDR,
576 ADRENO_REG_CP_ROQ_ADDR,
577 ADRENO_REG_CP_ROQ_DATA,
578 ADRENO_REG_CP_MERCIU_ADDR,
579 ADRENO_REG_CP_MERCIU_DATA,
580 ADRENO_REG_CP_MERCIU_DATA2,
581 ADRENO_REG_CP_MEQ_ADDR,
582 ADRENO_REG_CP_MEQ_DATA,
583 ADRENO_REG_CP_HW_FAULT,
584 ADRENO_REG_CP_PROTECT_STATUS,
585 ADRENO_REG_CP_PREEMPT,
586 ADRENO_REG_CP_PREEMPT_DEBUG,
587 ADRENO_REG_CP_PREEMPT_DISABLE,
588 ADRENO_REG_CP_PROTECT_REG_0,
589 ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
590 ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
591 ADRENO_REG_RBBM_STATUS,
592 ADRENO_REG_RBBM_STATUS3,
593 ADRENO_REG_RBBM_PERFCTR_CTL,
594 ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
595 ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
596 ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
597 ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
598 ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
599 ADRENO_REG_RBBM_INT_0_MASK,
600 ADRENO_REG_RBBM_INT_0_STATUS,
601 ADRENO_REG_RBBM_PM_OVERRIDE2,
602 ADRENO_REG_RBBM_INT_CLEAR_CMD,
603 ADRENO_REG_RBBM_SW_RESET_CMD,
604 ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD,
605 ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
606 ADRENO_REG_RBBM_CLOCK_CTL,
607 ADRENO_REG_VPC_DEBUG_RAM_SEL,
608 ADRENO_REG_VPC_DEBUG_RAM_READ,
609 ADRENO_REG_PA_SC_AA_CONFIG,
610 ADRENO_REG_SQ_GPR_MANAGEMENT,
611 ADRENO_REG_SQ_INST_STORE_MANAGEMENT,
612 ADRENO_REG_TP0_CHICKEN,
613 ADRENO_REG_RBBM_RBBM_CTL,
614 ADRENO_REG_UCHE_INVALIDATE0,
615 ADRENO_REG_UCHE_INVALIDATE1,
616 ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
617 ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
618 ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
619 ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
620 ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
621 ADRENO_REG_RBBM_SECVID_TRUST_CONFIG,
622 ADRENO_REG_RBBM_SECVID_TSB_CONTROL,
623 ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
624 ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
625 ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
626 ADRENO_REG_VBIF_XIN_HALT_CTRL0,
627 ADRENO_REG_VBIF_XIN_HALT_CTRL1,
628 ADRENO_REG_VBIF_VERSION,
Kyle Pieferb1027b02017-02-10 13:58:58 -0800629 ADRENO_REG_GMU_AO_INTERRUPT_EN,
Kyle Piefere7b06b42017-04-06 13:53:01 -0700630 ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
631 ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
632 ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
Kyle Pieferb1027b02017-02-10 13:58:58 -0800633 ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
634 ADRENO_REG_GMU_AHB_FENCE_STATUS,
635 ADRENO_REG_GMU_RPMH_POWER_STATE,
636 ADRENO_REG_GMU_HFI_CTRL_STATUS,
637 ADRENO_REG_GMU_HFI_VERSION_INFO,
638 ADRENO_REG_GMU_HFI_SFR_ADDR,
639 ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
640 ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
Kyle Piefere7b06b42017-04-06 13:53:01 -0700641 ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
Kyle Pieferb1027b02017-02-10 13:58:58 -0800642 ADRENO_REG_GMU_HOST2GMU_INTR_SET,
643 ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
644 ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700645 ADRENO_REG_REGISTER_MAX,
646};
647
648enum adreno_int_bits {
649 ADRENO_INT_RBBM_AHB_ERROR,
650 ADRENO_INT_BITS_MAX,
651};
652
653/**
654 * adreno_reg_offsets: Holds array of register offsets
655 * @offsets: Offset array of size defined by enum adreno_regs
656 * @offset_0: This is the index of the register in offset array whose value
657 * is 0. 0 is a valid register offset and during initialization of the
658 * offset array we need to know if an offset value is correctly defined to 0
659 */
660struct adreno_reg_offsets {
661 unsigned int *const offsets;
662 enum adreno_regs offset_0;
663};
664
665#define ADRENO_REG_UNUSED 0xFFFFFFFF
666#define ADRENO_REG_SKIP 0xFFFFFFFE
667#define ADRENO_REG_DEFINE(_offset, _reg) [_offset] = _reg
668#define ADRENO_INT_DEFINE(_offset, _val) ADRENO_REG_DEFINE(_offset, _val)
669
670/*
671 * struct adreno_vbif_data - Describes vbif register value pair
672 * @reg: Offset to vbif register
673 * @val: The value that should be programmed in the register at reg
674 */
675struct adreno_vbif_data {
676 unsigned int reg;
677 unsigned int val;
678};
679
680/*
681 * struct adreno_vbif_platform - Holds an array of vbif reg value pairs
682 * for a particular core
683 * @devfunc: Pointer to platform/core identification function
684 * @vbif: Array of reg value pairs for vbif registers
685 */
686struct adreno_vbif_platform {
687 int (*devfunc)(struct adreno_device *);
688 const struct adreno_vbif_data *vbif;
689};
690
691/*
692 * struct adreno_vbif_snapshot_registers - Holds an array of vbif registers
693 * listed for snapshot dump for a particular core
694 * @version: vbif version
695 * @mask: vbif revision mask
696 * @registers: vbif registers listed for snapshot dump
697 * @count: count of vbif registers listed for snapshot
698 */
699struct adreno_vbif_snapshot_registers {
700 const unsigned int version;
701 const unsigned int mask;
702 const unsigned int *registers;
703 const int count;
704};
705
706/**
707 * struct adreno_coresight_register - Definition for a coresight (tracebus)
708 * debug register
709 * @offset: Offset of the debug register in the KGSL mmio region
710 * @initial: Default value to write when coresight is enabled
711 * @value: Current shadow value of the register (to be reprogrammed after power
712 * collapse)
713 */
714struct adreno_coresight_register {
715 unsigned int offset;
716 unsigned int initial;
717 unsigned int value;
718};
719
720struct adreno_coresight_attr {
721 struct device_attribute attr;
722 struct adreno_coresight_register *reg;
723};
724
725ssize_t adreno_coresight_show_register(struct device *device,
726 struct device_attribute *attr, char *buf);
727
728ssize_t adreno_coresight_store_register(struct device *dev,
729 struct device_attribute *attr, const char *buf, size_t size);
730
731#define ADRENO_CORESIGHT_ATTR(_attrname, _reg) \
732 struct adreno_coresight_attr coresight_attr_##_attrname = { \
733 __ATTR(_attrname, 0644, \
734 adreno_coresight_show_register, \
735 adreno_coresight_store_register), \
736 (_reg), }
737
738/**
739 * struct adreno_coresight - GPU specific coresight definition
740 * @registers - Array of GPU specific registers to configure trace bus output
741 * @count - Number of registers in the array
742 * @groups - Pointer to an attribute list of control files
743 * @atid - The unique ATID value of the coresight device
744 */
745struct adreno_coresight {
746 struct adreno_coresight_register *registers;
747 unsigned int count;
748 const struct attribute_group **groups;
749 unsigned int atid;
750};
751
752
753struct adreno_irq_funcs {
754 void (*func)(struct adreno_device *, int);
755};
756#define ADRENO_IRQ_CALLBACK(_c) { .func = _c }
757
758struct adreno_irq {
759 unsigned int mask;
760 struct adreno_irq_funcs *funcs;
761};
762
763/*
764 * struct adreno_debugbus_block - Holds info about debug buses of a chip
765 * @block_id: Bus identifier
766 * @dwords: Number of dwords of data that this block holds
767 */
768struct adreno_debugbus_block {
769 unsigned int block_id;
770 unsigned int dwords;
771};
772
773/*
774 * struct adreno_snapshot_section_sizes - Structure holding the size of
775 * different sections dumped during device snapshot
776 * @cp_pfp: CP PFP data section size
777 * @cp_me: CP ME data section size
778 * @vpc_mem: VPC memory section size
779 * @cp_meq: CP MEQ size
780 * @shader_mem: Size of shader memory of 1 shader section
781 * @cp_merciu: CP MERCIU size
782 * @roq: ROQ size
783 */
784struct adreno_snapshot_sizes {
785 int cp_pfp;
786 int cp_me;
787 int vpc_mem;
788 int cp_meq;
789 int shader_mem;
790 int cp_merciu;
791 int roq;
792};
793
794/*
795 * struct adreno_snapshot_data - Holds data used in snapshot
796 * @sect_sizes: Has sections sizes
797 */
798struct adreno_snapshot_data {
799 struct adreno_snapshot_sizes *sect_sizes;
800};
801
802struct adreno_gpudev {
803 /*
804 * These registers are in a different location on different devices,
805 * so define them in the structure and use them as variables.
806 */
807 const struct adreno_reg_offsets *reg_offsets;
808 unsigned int *const int_bits;
809 const struct adreno_ft_perf_counters *ft_perf_counters;
810 unsigned int ft_perf_counters_count;
811
812 struct adreno_perfcounters *perfcounters;
813 const struct adreno_invalid_countables *invalid_countables;
814 struct adreno_snapshot_data *snapshot_data;
815
816 struct adreno_coresight *coresight;
817
818 struct adreno_irq *irq;
819 int num_prio_levels;
820 unsigned int vbif_xin_halt_ctrl0_mask;
821 /* GPU specific function hooks */
822 void (*irq_trace)(struct adreno_device *, unsigned int status);
823 void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *);
824 void (*platform_setup)(struct adreno_device *);
825 void (*init)(struct adreno_device *);
826 void (*remove)(struct adreno_device *);
827 int (*rb_start)(struct adreno_device *, unsigned int start_type);
828 int (*microcode_read)(struct adreno_device *);
829 void (*perfcounter_init)(struct adreno_device *);
830 void (*perfcounter_close)(struct adreno_device *);
831 void (*start)(struct adreno_device *);
832 bool (*is_sptp_idle)(struct adreno_device *);
833 int (*regulator_enable)(struct adreno_device *);
834 void (*regulator_disable)(struct adreno_device *);
835 void (*pwrlevel_change_settings)(struct adreno_device *,
836 unsigned int prelevel, unsigned int postlevel,
837 bool post);
838 uint64_t (*read_throttling_counters)(struct adreno_device *);
839 void (*count_throttles)(struct adreno_device *, uint64_t adj);
840 int (*enable_pwr_counters)(struct adreno_device *,
841 unsigned int counter);
842 unsigned int (*preemption_pre_ibsubmit)(
843 struct adreno_device *adreno_dev,
844 struct adreno_ringbuffer *rb,
845 unsigned int *cmds,
846 struct kgsl_context *context);
847 int (*preemption_yield_enable)(unsigned int *);
848 unsigned int (*preemption_post_ibsubmit)(
849 struct adreno_device *adreno_dev,
850 unsigned int *cmds);
851 int (*preemption_init)(struct adreno_device *);
852 void (*preemption_schedule)(struct adreno_device *);
853 void (*enable_64bit)(struct adreno_device *);
854 void (*clk_set_options)(struct adreno_device *,
Deepak Kumara309e0e2017-03-17 17:27:42 +0530855 const char *, struct clk *, bool on);
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -0600856 void (*llc_configure_gpu_scid)(struct adreno_device *adreno_dev);
Sushmita Susheelendra906564d2017-01-10 15:53:55 -0700857 void (*llc_configure_gpuhtw_scid)(struct adreno_device *adreno_dev);
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -0600858 void (*llc_enable_overrides)(struct adreno_device *adreno_dev);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800859 void (*pre_reset)(struct adreno_device *);
860 int (*oob_set)(struct adreno_device *adreno_dev, unsigned int set_mask,
861 unsigned int check_mask,
862 unsigned int clear_mask);
863 void (*oob_clear)(struct adreno_device *adreno_dev,
864 unsigned int clear_mask);
Carter Cooperdf7ba702017-03-20 11:28:04 -0600865 void (*gpu_keepalive)(struct adreno_device *adreno_dev,
866 bool state);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800867 int (*rpmh_gpu_pwrctrl)(struct adreno_device *, unsigned int ops,
868 unsigned int arg1, unsigned int arg2);
Oleg Perelet62d5cec2017-03-27 16:14:52 -0700869 bool (*hw_isidle)(struct adreno_device *);
870 int (*wait_for_gmu_idle)(struct adreno_device *);
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530871 const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
872 unsigned int fsynr1);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700873 int (*reset)(struct kgsl_device *, int fault);
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -0700874 int (*soft_reset)(struct adreno_device *);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700875};
876
877/**
878 * enum kgsl_ft_policy_bits - KGSL fault tolerance policy bits
879 * @KGSL_FT_OFF: Disable fault detection (not used)
880 * @KGSL_FT_REPLAY: Replay the faulting command
881 * @KGSL_FT_SKIPIB: Skip the faulting indirect buffer
882 * @KGSL_FT_SKIPFRAME: Skip the frame containing the faulting IB
883 * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command obj
884 * @KGSL_FT_TEMP_DISABLE: Disables FT for all commands
885 * @KGSL_FT_THROTTLE: Disable the context if it faults too often
886 * @KGSL_FT_SKIPCMD: Skip the command containing the faulting IB
887 */
888enum kgsl_ft_policy_bits {
889 KGSL_FT_OFF = 0,
890 KGSL_FT_REPLAY = 1,
891 KGSL_FT_SKIPIB = 2,
892 KGSL_FT_SKIPFRAME = 3,
893 KGSL_FT_DISABLE = 4,
894 KGSL_FT_TEMP_DISABLE = 5,
895 KGSL_FT_THROTTLE = 6,
896 KGSL_FT_SKIPCMD = 7,
897 /* KGSL_FT_MAX_BITS is used to calculate the mask */
898 KGSL_FT_MAX_BITS,
899 /* Internal bits - set during GFT */
900 /* Skip the PM dump on replayed command obj's */
901 KGSL_FT_SKIP_PMDUMP = 31,
902};
903
904#define KGSL_FT_POLICY_MASK GENMASK(KGSL_FT_MAX_BITS - 1, 0)
905
906#define KGSL_FT_DEFAULT_POLICY \
907 (BIT(KGSL_FT_REPLAY) | \
908 BIT(KGSL_FT_SKIPCMD) | \
909 BIT(KGSL_FT_THROTTLE))
910
911#define ADRENO_FT_TYPES \
912 { BIT(KGSL_FT_OFF), "off" }, \
913 { BIT(KGSL_FT_REPLAY), "replay" }, \
914 { BIT(KGSL_FT_SKIPIB), "skipib" }, \
915 { BIT(KGSL_FT_SKIPFRAME), "skipframe" }, \
916 { BIT(KGSL_FT_DISABLE), "disable" }, \
917 { BIT(KGSL_FT_TEMP_DISABLE), "temp" }, \
918 { BIT(KGSL_FT_THROTTLE), "throttle"}, \
919 { BIT(KGSL_FT_SKIPCMD), "skipcmd" }
920
921/**
922 * enum kgsl_ft_pagefault_policy_bits - KGSL pagefault policy bits
923 * @KGSL_FT_PAGEFAULT_INT_ENABLE: No longer used, but retained for compatibility
924 * @KGSL_FT_PAGEFAULT_GPUHALT_ENABLE: enable GPU halt on pagefaults
925 * @KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE: log one pagefault per page
926 * @KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT: log one pagefault per interrupt
927 */
928enum {
929 KGSL_FT_PAGEFAULT_INT_ENABLE = 0,
930 KGSL_FT_PAGEFAULT_GPUHALT_ENABLE = 1,
931 KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE = 2,
932 KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT = 3,
933 /* KGSL_FT_PAGEFAULT_MAX_BITS is used to calculate the mask */
934 KGSL_FT_PAGEFAULT_MAX_BITS,
935};
936
937#define KGSL_FT_PAGEFAULT_MASK GENMASK(KGSL_FT_PAGEFAULT_MAX_BITS - 1, 0)
938
939#define KGSL_FT_PAGEFAULT_DEFAULT_POLICY 0
940
941#define FOR_EACH_RINGBUFFER(_dev, _rb, _i) \
942 for ((_i) = 0, (_rb) = &((_dev)->ringbuffers[0]); \
943 (_i) < (_dev)->num_ringbuffers; \
944 (_i)++, (_rb)++)
945
946struct adreno_ft_perf_counters {
947 unsigned int counter;
948 unsigned int countable;
949};
950
951extern unsigned int *adreno_ft_regs;
952extern unsigned int adreno_ft_regs_num;
953extern unsigned int *adreno_ft_regs_val;
954
955extern struct adreno_gpudev adreno_a3xx_gpudev;
956extern struct adreno_gpudev adreno_a4xx_gpudev;
957extern struct adreno_gpudev adreno_a5xx_gpudev;
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -0700958extern struct adreno_gpudev adreno_a6xx_gpudev;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700959
960extern int adreno_wake_nice;
961extern unsigned int adreno_wake_timeout;
962
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700963int adreno_start(struct kgsl_device *device, int priority);
964int adreno_soft_reset(struct kgsl_device *device);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700965long adreno_ioctl(struct kgsl_device_private *dev_priv,
966 unsigned int cmd, unsigned long arg);
967
968long adreno_ioctl_helper(struct kgsl_device_private *dev_priv,
969 unsigned int cmd, unsigned long arg,
970 const struct kgsl_ioctl *cmds, int len);
971
Carter Cooper1d8f5472017-03-15 15:01:09 -0600972int a5xx_critical_packet_submit(struct adreno_device *adreno_dev,
973 struct adreno_ringbuffer *rb);
974int adreno_set_unsecured_mode(struct adreno_device *adreno_dev,
975 struct adreno_ringbuffer *rb);
Carter Cooper8567af02017-03-15 14:22:03 -0600976void adreno_spin_idle_debug(struct adreno_device *adreno_dev, const char *str);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700977int adreno_spin_idle(struct adreno_device *device, unsigned int timeout);
978int adreno_idle(struct kgsl_device *device);
979bool adreno_isidle(struct kgsl_device *device);
980
981int adreno_set_constraint(struct kgsl_device *device,
982 struct kgsl_context *context,
983 struct kgsl_device_constraint *constraint);
984
985void adreno_shadermem_regread(struct kgsl_device *device,
986 unsigned int offsetwords,
987 unsigned int *value);
988
989void adreno_snapshot(struct kgsl_device *device,
990 struct kgsl_snapshot *snapshot,
991 struct kgsl_context *context);
992
993int adreno_reset(struct kgsl_device *device, int fault);
994
995void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
996 struct adreno_context *drawctxt,
997 struct kgsl_drawobj *drawobj);
998
999int adreno_coresight_init(struct adreno_device *adreno_dev);
1000
1001void adreno_coresight_start(struct adreno_device *adreno_dev);
1002void adreno_coresight_stop(struct adreno_device *adreno_dev);
1003
1004void adreno_coresight_remove(struct adreno_device *adreno_dev);
1005
1006bool adreno_hw_isidle(struct adreno_device *adreno_dev);
1007
1008void adreno_fault_detect_start(struct adreno_device *adreno_dev);
1009void adreno_fault_detect_stop(struct adreno_device *adreno_dev);
1010
1011void adreno_hang_int_callback(struct adreno_device *adreno_dev, int bit);
1012void adreno_cp_callback(struct adreno_device *adreno_dev, int bit);
1013
1014int adreno_sysfs_init(struct adreno_device *adreno_dev);
1015void adreno_sysfs_close(struct adreno_device *adreno_dev);
1016
1017void adreno_irqctrl(struct adreno_device *adreno_dev, int state);
1018
1019long adreno_ioctl_perfcounter_get(struct kgsl_device_private *dev_priv,
1020 unsigned int cmd, void *data);
1021
1022long adreno_ioctl_perfcounter_put(struct kgsl_device_private *dev_priv,
1023 unsigned int cmd, void *data);
1024
1025int adreno_efuse_map(struct adreno_device *adreno_dev);
1026int adreno_efuse_read_u32(struct adreno_device *adreno_dev, unsigned int offset,
1027 unsigned int *val);
1028void adreno_efuse_unmap(struct adreno_device *adreno_dev);
1029
1030#define ADRENO_TARGET(_name, _id) \
1031static inline int adreno_is_##_name(struct adreno_device *adreno_dev) \
1032{ \
1033 return (ADRENO_GPUREV(adreno_dev) == (_id)); \
1034}
1035
1036static inline int adreno_is_a3xx(struct adreno_device *adreno_dev)
1037{
1038 return ((ADRENO_GPUREV(adreno_dev) >= 300) &&
1039 (ADRENO_GPUREV(adreno_dev) < 400));
1040}
1041
1042ADRENO_TARGET(a304, ADRENO_REV_A304)
1043ADRENO_TARGET(a305, ADRENO_REV_A305)
1044ADRENO_TARGET(a305b, ADRENO_REV_A305B)
1045ADRENO_TARGET(a305c, ADRENO_REV_A305C)
1046ADRENO_TARGET(a306, ADRENO_REV_A306)
1047ADRENO_TARGET(a306a, ADRENO_REV_A306A)
1048ADRENO_TARGET(a310, ADRENO_REV_A310)
1049ADRENO_TARGET(a320, ADRENO_REV_A320)
1050ADRENO_TARGET(a330, ADRENO_REV_A330)
1051
1052static inline int adreno_is_a330v2(struct adreno_device *adreno_dev)
1053{
1054 return ((ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A330) &&
1055 (ADRENO_CHIPID_PATCH(adreno_dev->chipid) > 0));
1056}
1057
1058static inline int adreno_is_a330v21(struct adreno_device *adreno_dev)
1059{
1060 return ((ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A330) &&
1061 (ADRENO_CHIPID_PATCH(adreno_dev->chipid) > 0xF));
1062}
1063
1064static inline int adreno_is_a4xx(struct adreno_device *adreno_dev)
1065{
1066 return ADRENO_GPUREV(adreno_dev) >= 400 &&
1067 ADRENO_GPUREV(adreno_dev) < 500;
1068}
1069
1070ADRENO_TARGET(a405, ADRENO_REV_A405);
1071
1072static inline int adreno_is_a405v2(struct adreno_device *adreno_dev)
1073{
1074 return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A405) &&
1075 (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0x10);
1076}
1077
1078ADRENO_TARGET(a418, ADRENO_REV_A418)
1079ADRENO_TARGET(a420, ADRENO_REV_A420)
1080ADRENO_TARGET(a430, ADRENO_REV_A430)
1081
1082static inline int adreno_is_a430v2(struct adreno_device *adreno_dev)
1083{
1084 return ((ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A430) &&
1085 (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1));
1086}
1087
1088static inline int adreno_is_a5xx(struct adreno_device *adreno_dev)
1089{
1090 return ADRENO_GPUREV(adreno_dev) >= 500 &&
1091 ADRENO_GPUREV(adreno_dev) < 600;
1092}
1093
1094ADRENO_TARGET(a505, ADRENO_REV_A505)
1095ADRENO_TARGET(a506, ADRENO_REV_A506)
Rajesh Kemisettiaed6ec72017-02-06 09:37:00 +05301096ADRENO_TARGET(a508, ADRENO_REV_A508)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001097ADRENO_TARGET(a510, ADRENO_REV_A510)
1098ADRENO_TARGET(a512, ADRENO_REV_A512)
1099ADRENO_TARGET(a530, ADRENO_REV_A530)
1100ADRENO_TARGET(a540, ADRENO_REV_A540)
1101
1102static inline int adreno_is_a530v1(struct adreno_device *adreno_dev)
1103{
1104 return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A530) &&
1105 (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0);
1106}
1107
1108static inline int adreno_is_a530v2(struct adreno_device *adreno_dev)
1109{
1110 return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A530) &&
1111 (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1);
1112}
1113
1114static inline int adreno_is_a530v3(struct adreno_device *adreno_dev)
1115{
1116 return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A530) &&
1117 (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 2);
1118}
1119
1120static inline int adreno_is_a505_or_a506(struct adreno_device *adreno_dev)
1121{
1122 return ADRENO_GPUREV(adreno_dev) >= 505 &&
1123 ADRENO_GPUREV(adreno_dev) <= 506;
1124}
1125
1126static inline int adreno_is_a540v1(struct adreno_device *adreno_dev)
1127{
1128 return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A540) &&
1129 (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0);
1130}
1131
1132static inline int adreno_is_a540v2(struct adreno_device *adreno_dev)
1133{
1134 return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A540) &&
1135 (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1);
1136}
1137
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001138static inline int adreno_is_a6xx(struct adreno_device *adreno_dev)
1139{
1140 return ADRENO_GPUREV(adreno_dev) >= 600 &&
1141 ADRENO_GPUREV(adreno_dev) < 700;
1142}
1143
Rajesh Kemisetti8d5cc6e2017-06-06 16:44:17 +05301144ADRENO_TARGET(a615, ADRENO_REV_A615)
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001145ADRENO_TARGET(a630, ADRENO_REV_A630)
1146
Shrenuj Bansal397e5892017-03-13 13:38:47 -07001147static inline int adreno_is_a630v1(struct adreno_device *adreno_dev)
1148{
1149 return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A630) &&
1150 (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0);
1151}
1152
Shrenuj Bansala419c792016-10-20 14:05:11 -07001153/*
1154 * adreno_checkreg_off() - Checks the validity of a register enum
1155 * @adreno_dev: Pointer to adreno device
1156 * @offset_name: The register enum that is checked
1157 */
1158static inline bool adreno_checkreg_off(struct adreno_device *adreno_dev,
1159 enum adreno_regs offset_name)
1160{
1161 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1162
1163 if (offset_name >= ADRENO_REG_REGISTER_MAX ||
1164 gpudev->reg_offsets->offsets[offset_name] == ADRENO_REG_UNUSED)
1165 return false;
1166
1167 /*
1168 * GPU register programming is kept common as much as possible
1169 * across the cores, Use ADRENO_REG_SKIP when certain register
1170 * programming needs to be skipped for certain GPU cores.
1171 * Example: Certain registers on a5xx like IB1_BASE are 64 bit.
1172 * Common programming programs 64bit register but upper 32 bits
1173 * are skipped in a4xx and a3xx using ADRENO_REG_SKIP.
1174 */
1175 if (gpudev->reg_offsets->offsets[offset_name] == ADRENO_REG_SKIP)
1176 return false;
1177
1178 return true;
1179}
1180
1181/*
1182 * adreno_readreg() - Read a register by getting its offset from the
1183 * offset array defined in gpudev node
1184 * @adreno_dev: Pointer to the the adreno device
1185 * @offset_name: The register enum that is to be read
1186 * @val: Register value read is placed here
1187 */
1188static inline void adreno_readreg(struct adreno_device *adreno_dev,
1189 enum adreno_regs offset_name, unsigned int *val)
1190{
1191 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1192
1193 if (adreno_checkreg_off(adreno_dev, offset_name))
1194 kgsl_regread(KGSL_DEVICE(adreno_dev),
1195 gpudev->reg_offsets->offsets[offset_name], val);
1196 else
1197 *val = 0;
1198}
1199
1200/*
1201 * adreno_writereg() - Write a register by getting its offset from the
1202 * offset array defined in gpudev node
1203 * @adreno_dev: Pointer to the the adreno device
1204 * @offset_name: The register enum that is to be written
1205 * @val: Value to write
1206 */
1207static inline void adreno_writereg(struct adreno_device *adreno_dev,
1208 enum adreno_regs offset_name, unsigned int val)
1209{
1210 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1211
1212 if (adreno_checkreg_off(adreno_dev, offset_name))
1213 kgsl_regwrite(KGSL_DEVICE(adreno_dev),
1214 gpudev->reg_offsets->offsets[offset_name], val);
1215}
1216
1217/*
1218 * adreno_getreg() - Returns the offset value of a register from the
1219 * register offset array in the gpudev node
1220 * @adreno_dev: Pointer to the the adreno device
1221 * @offset_name: The register enum whore offset is returned
1222 */
1223static inline unsigned int adreno_getreg(struct adreno_device *adreno_dev,
1224 enum adreno_regs offset_name)
1225{
1226 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1227
1228 if (!adreno_checkreg_off(adreno_dev, offset_name))
1229 return ADRENO_REG_REGISTER_MAX;
1230 return gpudev->reg_offsets->offsets[offset_name];
1231}
1232
1233/*
Kyle Pieferb1027b02017-02-10 13:58:58 -08001234 * adreno_read_gmureg() - Read a GMU register by getting its offset from the
1235 * offset array defined in gpudev node
1236 * @adreno_dev: Pointer to the the adreno device
1237 * @offset_name: The register enum that is to be read
1238 * @val: Register value read is placed here
1239 */
1240static inline void adreno_read_gmureg(struct adreno_device *adreno_dev,
1241 enum adreno_regs offset_name, unsigned int *val)
1242{
1243 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1244
1245 if (adreno_checkreg_off(adreno_dev, offset_name))
1246 kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
1247 gpudev->reg_offsets->offsets[offset_name], val);
1248 else
Carter Cooper83454bf2017-03-20 11:26:04 -06001249 *val = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001250}
1251
1252/*
1253 * adreno_write_gmureg() - Write a GMU register by getting its offset from the
1254 * offset array defined in gpudev node
1255 * @adreno_dev: Pointer to the the adreno device
1256 * @offset_name: The register enum that is to be written
1257 * @val: Value to write
1258 */
1259static inline void adreno_write_gmureg(struct adreno_device *adreno_dev,
1260 enum adreno_regs offset_name, unsigned int val)
1261{
1262 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1263
1264 if (adreno_checkreg_off(adreno_dev, offset_name))
1265 kgsl_gmu_regwrite(KGSL_DEVICE(adreno_dev),
1266 gpudev->reg_offsets->offsets[offset_name], val);
1267}
1268
1269/*
Shrenuj Bansala419c792016-10-20 14:05:11 -07001270 * adreno_get_int() - Returns the offset value of an interrupt bit from
1271 * the interrupt bit array in the gpudev node
1272 * @adreno_dev: Pointer to the the adreno device
1273 * @bit_name: The interrupt bit enum whose bit is returned
1274 */
1275static inline unsigned int adreno_get_int(struct adreno_device *adreno_dev,
1276 enum adreno_int_bits bit_name)
1277{
1278 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1279
1280 if (bit_name >= ADRENO_INT_BITS_MAX)
1281 return -ERANGE;
1282
1283 return gpudev->int_bits[bit_name];
1284}
1285
1286/**
1287 * adreno_gpu_fault() - Return the current state of the GPU
1288 * @adreno_dev: A pointer to the adreno_device to query
1289 *
1290 * Return 0 if there is no fault or positive with the last type of fault that
1291 * occurred
1292 */
1293static inline unsigned int adreno_gpu_fault(struct adreno_device *adreno_dev)
1294{
1295 /* make sure we're reading the latest value */
1296 smp_rmb();
1297 return atomic_read(&adreno_dev->dispatcher.fault);
1298}
1299
1300/**
1301 * adreno_set_gpu_fault() - Set the current fault status of the GPU
1302 * @adreno_dev: A pointer to the adreno_device to set
1303 * @state: fault state to set
1304 *
1305 */
1306static inline void adreno_set_gpu_fault(struct adreno_device *adreno_dev,
1307 int state)
1308{
1309 /* only set the fault bit w/o overwriting other bits */
1310 atomic_add(state, &adreno_dev->dispatcher.fault);
1311
1312 /* make sure other CPUs see the update */
1313 smp_wmb();
1314}
1315
1316
1317/**
1318 * adreno_clear_gpu_fault() - Clear the GPU fault register
1319 * @adreno_dev: A pointer to an adreno_device structure
1320 *
1321 * Clear the GPU fault status for the adreno device
1322 */
1323
1324static inline void adreno_clear_gpu_fault(struct adreno_device *adreno_dev)
1325{
1326 atomic_set(&adreno_dev->dispatcher.fault, 0);
1327
1328 /* make sure other CPUs see the update */
1329 smp_wmb();
1330}
1331
1332/**
1333 * adreno_gpu_halt() - Return the GPU halt refcount
1334 * @adreno_dev: A pointer to the adreno_device
1335 */
1336static inline int adreno_gpu_halt(struct adreno_device *adreno_dev)
1337{
1338 /* make sure we're reading the latest value */
1339 smp_rmb();
1340 return atomic_read(&adreno_dev->halt);
1341}
1342
1343
1344/**
1345 * adreno_clear_gpu_halt() - Clear the GPU halt refcount
1346 * @adreno_dev: A pointer to the adreno_device
1347 */
1348static inline void adreno_clear_gpu_halt(struct adreno_device *adreno_dev)
1349{
1350 atomic_set(&adreno_dev->halt, 0);
1351
1352 /* make sure other CPUs see the update */
1353 smp_wmb();
1354}
1355
1356/**
1357 * adreno_get_gpu_halt() - Increment GPU halt refcount
1358 * @adreno_dev: A pointer to the adreno_device
1359 */
1360static inline void adreno_get_gpu_halt(struct adreno_device *adreno_dev)
1361{
1362 atomic_inc(&adreno_dev->halt);
1363}
1364
1365/**
1366 * adreno_put_gpu_halt() - Decrement GPU halt refcount
1367 * @adreno_dev: A pointer to the adreno_device
1368 */
1369static inline void adreno_put_gpu_halt(struct adreno_device *adreno_dev)
1370{
1371 /* Make sure the refcount is good */
1372 int ret = atomic_dec_if_positive(&adreno_dev->halt);
1373
1374 WARN(ret < 0, "GPU halt refcount unbalanced\n");
1375}
1376
1377
1378/*
1379 * adreno_vbif_start() - Program VBIF registers, called in device start
1380 * @adreno_dev: Pointer to device whose vbif data is to be programmed
1381 * @vbif_platforms: list register value pair of vbif for a family
1382 * of adreno cores
1383 * @num_platforms: Number of platforms contained in vbif_platforms
1384 */
1385static inline void adreno_vbif_start(struct adreno_device *adreno_dev,
1386 const struct adreno_vbif_platform *vbif_platforms,
1387 int num_platforms)
1388{
1389 int i;
1390 const struct adreno_vbif_data *vbif = NULL;
1391
1392 for (i = 0; i < num_platforms; i++) {
1393 if (vbif_platforms[i].devfunc(adreno_dev)) {
1394 vbif = vbif_platforms[i].vbif;
1395 break;
1396 }
1397 }
1398
1399 while ((vbif != NULL) && (vbif->reg != 0)) {
1400 kgsl_regwrite(KGSL_DEVICE(adreno_dev), vbif->reg, vbif->val);
1401 vbif++;
1402 }
1403}
1404
1405/**
1406 * adreno_set_protected_registers() - Protect the specified range of registers
1407 * from being accessed by the GPU
1408 * @adreno_dev: pointer to the Adreno device
1409 * @index: Pointer to the index of the protect mode register to write to
1410 * @reg: Starting dword register to write
1411 * @mask_len: Size of the mask to protect (# of registers = 2 ** mask_len)
1412 *
1413 * Add the range of registers to the list of protected mode registers that will
1414 * cause an exception if the GPU accesses them. There are 16 available
1415 * protected mode registers. Index is used to specify which register to write
1416 * to - the intent is to call this function multiple times with the same index
1417 * pointer for each range and the registers will be magically programmed in
1418 * incremental fashion
1419 */
1420static inline void adreno_set_protected_registers(
1421 struct adreno_device *adreno_dev, unsigned int *index,
1422 unsigned int reg, int mask_len)
1423{
1424 unsigned int val;
1425 unsigned int base =
1426 adreno_getreg(adreno_dev, ADRENO_REG_CP_PROTECT_REG_0);
1427 unsigned int offset = *index;
1428 unsigned int max_slots = adreno_dev->gpucore->num_protected_regs ?
1429 adreno_dev->gpucore->num_protected_regs : 16;
1430
1431 /* Do we have a free slot? */
1432 if (WARN(*index >= max_slots, "Protected register slots full: %d/%d\n",
1433 *index, max_slots))
1434 return;
1435
1436 /*
1437 * On A4XX targets with more than 16 protected mode registers
1438 * the upper registers are not contiguous with the lower 16
1439 * registers so we have to adjust the base and offset accordingly
1440 */
1441
1442 if (adreno_is_a4xx(adreno_dev) && *index >= 0x10) {
1443 base = A4XX_CP_PROTECT_REG_10;
1444 offset = *index - 0x10;
1445 }
1446
1447 val = 0x60000000 | ((mask_len & 0x1F) << 24) | ((reg << 2) & 0xFFFFF);
1448
1449 kgsl_regwrite(KGSL_DEVICE(adreno_dev), base + offset, val);
1450 *index = *index + 1;
1451}
1452
1453#ifdef CONFIG_DEBUG_FS
1454void adreno_debugfs_init(struct adreno_device *adreno_dev);
1455void adreno_context_debugfs_init(struct adreno_device *adreno_dev,
1456 struct adreno_context *ctx);
1457#else
1458static inline void adreno_debugfs_init(struct adreno_device *adreno_dev) { }
1459static inline void adreno_context_debugfs_init(struct adreno_device *device,
1460 struct adreno_context *context)
1461 { }
1462#endif
1463
1464/**
1465 * adreno_compare_pm4_version() - Compare the PM4 microcode version
1466 * @adreno_dev: Pointer to the adreno_device struct
1467 * @version: Version number to compare again
1468 *
1469 * Compare the current version against the specified version and return -1 if
1470 * the current code is older, 0 if equal or 1 if newer.
1471 */
1472static inline int adreno_compare_pm4_version(struct adreno_device *adreno_dev,
1473 unsigned int version)
1474{
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001475 if (adreno_dev->fw[ADRENO_FW_PM4].version == version)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001476 return 0;
1477
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001478 return (adreno_dev->fw[ADRENO_FW_PM4].version > version) ? 1 : -1;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001479}
1480
1481/**
1482 * adreno_compare_pfp_version() - Compare the PFP microcode version
1483 * @adreno_dev: Pointer to the adreno_device struct
1484 * @version: Version number to compare against
1485 *
1486 * Compare the current version against the specified version and return -1 if
1487 * the current code is older, 0 if equal or 1 if newer.
1488 */
1489static inline int adreno_compare_pfp_version(struct adreno_device *adreno_dev,
1490 unsigned int version)
1491{
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001492 if (adreno_dev->fw[ADRENO_FW_PFP].version == version)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001493 return 0;
1494
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001495 return (adreno_dev->fw[ADRENO_FW_PFP].version > version) ? 1 : -1;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001496}
1497
1498/*
1499 * adreno_bootstrap_ucode() - Checks if Ucode bootstrapping is supported
1500 * @adreno_dev: Pointer to the the adreno device
1501 */
1502static inline int adreno_bootstrap_ucode(struct adreno_device *adreno_dev)
1503{
1504 return (ADRENO_FEATURE(adreno_dev, ADRENO_USE_BOOTSTRAP) &&
1505 adreno_compare_pfp_version(adreno_dev,
1506 adreno_dev->gpucore->pfp_bstrp_ver) >= 0) ? 1 : 0;
1507}
1508
1509/**
1510 * adreno_in_preempt_state() - Check if preemption state is equal to given state
1511 * @adreno_dev: Device whose preemption state is checked
1512 * @state: State to compare against
1513 */
1514static inline bool adreno_in_preempt_state(struct adreno_device *adreno_dev,
1515 enum adreno_preempt_states state)
1516{
1517 return atomic_read(&adreno_dev->preempt.state) == state;
1518}
1519/**
1520 * adreno_set_preempt_state() - Set the specified preemption state
1521 * @adreno_dev: Device to change preemption state
1522 * @state: State to set
1523 */
1524static inline void adreno_set_preempt_state(struct adreno_device *adreno_dev,
1525 enum adreno_preempt_states state)
1526{
1527 /*
1528 * atomic_set doesn't use barriers, so we need to do it ourselves. One
1529 * before...
1530 */
1531 smp_wmb();
1532 atomic_set(&adreno_dev->preempt.state, state);
1533
1534 /* ... and one after */
1535 smp_wmb();
1536}
1537
1538static inline bool adreno_is_preemption_enabled(
1539 struct adreno_device *adreno_dev)
1540{
1541 return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
1542}
1543/**
1544 * adreno_ctx_get_rb() - Return the ringbuffer that a context should
1545 * use based on priority
1546 * @adreno_dev: The adreno device that context is using
1547 * @drawctxt: The context pointer
1548 */
1549static inline struct adreno_ringbuffer *adreno_ctx_get_rb(
1550 struct adreno_device *adreno_dev,
1551 struct adreno_context *drawctxt)
1552{
1553 struct kgsl_context *context;
1554 int level;
1555
1556 if (!drawctxt)
1557 return NULL;
1558
1559 context = &(drawctxt->base);
1560
1561 /*
1562 * If preemption is disabled then everybody needs to go on the same
1563 * ringbuffer
1564 */
1565
1566 if (!adreno_is_preemption_enabled(adreno_dev))
1567 return &(adreno_dev->ringbuffers[0]);
1568
1569 /*
1570 * Math to convert the priority field in context structure to an RB ID.
1571 * Divide up the context priority based on number of ringbuffer levels.
1572 */
1573 level = context->priority / adreno_dev->num_ringbuffers;
1574 if (level < adreno_dev->num_ringbuffers)
1575 return &(adreno_dev->ringbuffers[level]);
1576 else
1577 return &(adreno_dev->ringbuffers[
1578 adreno_dev->num_ringbuffers - 1]);
1579}
1580
1581/*
1582 * adreno_compare_prio_level() - Compares 2 priority levels based on enum values
1583 * @p1: First priority level
1584 * @p2: Second priority level
1585 *
1586 * Returns greater than 0 if p1 is higher priority, 0 if levels are equal else
1587 * less than 0
1588 */
1589static inline int adreno_compare_prio_level(int p1, int p2)
1590{
1591 return p2 - p1;
1592}
1593
1594void adreno_readreg64(struct adreno_device *adreno_dev,
1595 enum adreno_regs lo, enum adreno_regs hi, uint64_t *val);
1596
1597void adreno_writereg64(struct adreno_device *adreno_dev,
1598 enum adreno_regs lo, enum adreno_regs hi, uint64_t val);
1599
1600unsigned int adreno_get_rptr(struct adreno_ringbuffer *rb);
1601
1602static inline bool adreno_rb_empty(struct adreno_ringbuffer *rb)
1603{
1604 return (adreno_get_rptr(rb) == rb->wptr);
1605}
1606
1607static inline bool adreno_soft_fault_detect(struct adreno_device *adreno_dev)
1608{
1609 return adreno_dev->fast_hang_detect &&
1610 !test_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv);
1611}
1612
1613static inline bool adreno_long_ib_detect(struct adreno_device *adreno_dev)
1614{
1615 return adreno_dev->long_ib_detect &&
1616 !test_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv);
1617}
1618
1619/*
1620 * adreno_support_64bit() - Check the feature flag only if it is in
1621 * 64bit kernel otherwise return false
1622 * adreno_dev: The adreno device
1623 */
1624#if BITS_PER_LONG == 64
1625static inline bool adreno_support_64bit(struct adreno_device *adreno_dev)
1626{
1627 return ADRENO_FEATURE(adreno_dev, ADRENO_64BIT);
1628}
1629#else
1630static inline bool adreno_support_64bit(struct adreno_device *adreno_dev)
1631{
1632 return false;
1633}
1634#endif /*BITS_PER_LONG*/
1635
1636static inline void adreno_ringbuffer_set_global(
1637 struct adreno_device *adreno_dev, int name)
1638{
1639 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1640
1641 kgsl_sharedmem_writel(device,
1642 &adreno_dev->ringbuffers[0].pagetable_desc,
1643 PT_INFO_OFFSET(current_global_ptname), name);
1644}
1645
1646static inline void adreno_ringbuffer_set_pagetable(struct adreno_ringbuffer *rb,
1647 struct kgsl_pagetable *pt)
1648{
1649 struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
1650 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1651 unsigned long flags;
1652
1653 spin_lock_irqsave(&rb->preempt_lock, flags);
1654
1655 kgsl_sharedmem_writel(device, &rb->pagetable_desc,
1656 PT_INFO_OFFSET(current_rb_ptname), pt->name);
1657
1658 kgsl_sharedmem_writeq(device, &rb->pagetable_desc,
1659 PT_INFO_OFFSET(ttbr0), kgsl_mmu_pagetable_get_ttbr0(pt));
1660
1661 kgsl_sharedmem_writel(device, &rb->pagetable_desc,
1662 PT_INFO_OFFSET(contextidr),
1663 kgsl_mmu_pagetable_get_contextidr(pt));
1664
1665 spin_unlock_irqrestore(&rb->preempt_lock, flags);
1666}
1667
1668static inline unsigned int counter_delta(struct kgsl_device *device,
1669 unsigned int reg, unsigned int *counter)
1670{
1671 unsigned int val;
1672 unsigned int ret = 0;
1673
1674 /* Read the value */
1675 kgsl_regread(device, reg, &val);
1676
1677 /* Return 0 for the first read */
1678 if (*counter != 0) {
1679 if (val < *counter)
1680 ret = (0xFFFFFFFF - *counter) + val;
1681 else
1682 ret = val - *counter;
1683 }
1684
1685 *counter = val;
1686 return ret;
1687}
Carter Cooper05f2a6b2017-03-20 11:43:11 -06001688
1689static inline int adreno_perfcntr_active_oob_get(
1690 struct adreno_device *adreno_dev)
1691{
1692 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1693 int ret;
1694
1695 ret = kgsl_active_count_get(KGSL_DEVICE(adreno_dev));
1696 if (ret)
1697 return ret;
1698
1699 if (gpudev->oob_set) {
1700 ret = gpudev->oob_set(adreno_dev, OOB_PERFCNTR_SET_MASK,
1701 OOB_PERFCNTR_CHECK_MASK,
1702 OOB_PERFCNTR_CLEAR_MASK);
1703 if (ret)
1704 kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
1705 }
1706
1707 return ret;
1708}
1709
1710static inline void adreno_perfcntr_active_oob_put(
1711 struct adreno_device *adreno_dev)
1712{
1713 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1714
1715 if (gpudev->oob_clear)
1716 gpudev->oob_clear(adreno_dev, OOB_PERFCNTR_CLEAR_MASK);
1717
1718 kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
1719}
1720
Kyle Piefere923b7a2017-03-28 17:31:48 -07001721/**
1722 * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe
1723 * @device: Pointer to the device whose VBIF pipe is to be cleared
1724 */
1725static inline int adreno_vbif_clear_pending_transactions(
1726 struct kgsl_device *device)
1727{
1728 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1729 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1730 unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
1731 unsigned int val;
1732 unsigned long wait_for_vbif;
1733 int ret = 0;
1734
1735 adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask);
1736 /* wait for the transactions to clear */
1737 wait_for_vbif = jiffies + msecs_to_jiffies(100);
1738 while (1) {
1739 adreno_readreg(adreno_dev,
1740 ADRENO_REG_VBIF_XIN_HALT_CTRL1, &val);
1741 if ((val & mask) == mask)
1742 break;
1743 if (time_after(jiffies, wait_for_vbif)) {
1744 KGSL_DRV_ERR(device,
1745 "Wait limit reached for VBIF XIN Halt\n");
1746 ret = -ETIMEDOUT;
1747 break;
1748 }
1749 }
1750 adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
1751 return ret;
1752}
1753
Shrenuj Bansala419c792016-10-20 14:05:11 -07001754#endif /*__ADRENO_H */