blob: cc9401e95d825c3028847d83af96679e88331d47 [file] [log] [blame]
Archana Sriramd66ae7b2020-10-18 23:34:04 +05301/* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_DEVICE_H
14#define __KGSL_DEVICE_H
15
16#include <linux/slab.h>
17#include <linux/idr.h>
18#include <linux/pm_qos.h>
19#include <linux/sched.h>
20
21#include "kgsl.h"
22#include "kgsl_mmu.h"
23#include "kgsl_pwrctrl.h"
24#include "kgsl_log.h"
25#include "kgsl_pwrscale.h"
26#include "kgsl_snapshot.h"
27#include "kgsl_sharedmem.h"
28#include "kgsl_drawobj.h"
Kyle Pieferb1027b02017-02-10 13:58:58 -080029#include "kgsl_gmu.h"
Shrenuj Bansala419c792016-10-20 14:05:11 -070030
31#define KGSL_IOCTL_FUNC(_cmd, _func) \
32 [_IOC_NR((_cmd))] = \
33 { .cmd = (_cmd), .func = (_func) }
34
35/*
36 * KGSL device state is initialized to INIT when platform_probe *
37 * successfully initialized the device. Once a device has been opened *
38 * (started) it becomes active. NAP implies that only low latency *
39 * resources (for now clocks on some platforms) are off. SLEEP implies *
40 * that the KGSL module believes a device is idle (has been inactive *
41 * past its timer) and all system resources are released. SUSPEND is *
42 * requested by the kernel and will be enforced upon all open devices. *
Kyle Piefere923b7a2017-03-28 17:31:48 -070043 * RESET indicates that GPU or GMU hang happens. KGSL is handling *
44 * snapshot or recover GPU from hang. *
Shrenuj Bansala419c792016-10-20 14:05:11 -070045 */
46
47#define KGSL_STATE_NONE 0x00000000
48#define KGSL_STATE_INIT 0x00000001
49#define KGSL_STATE_ACTIVE 0x00000002
50#define KGSL_STATE_NAP 0x00000004
51#define KGSL_STATE_SUSPEND 0x00000010
52#define KGSL_STATE_AWARE 0x00000020
53#define KGSL_STATE_SLUMBER 0x00000080
Kyle Piefere923b7a2017-03-28 17:31:48 -070054#define KGSL_STATE_RESET 0x00000100
Shrenuj Bansala419c792016-10-20 14:05:11 -070055
56/**
57 * enum kgsl_event_results - result codes passed to an event callback when the
58 * event is retired or cancelled
59 * @KGSL_EVENT_RETIRED: The timestamp associated with the event retired
60 * successflly
61 * @KGSL_EVENT_CANCELLED: The event was cancelled before the event was fired
62 */
63enum kgsl_event_results {
64 KGSL_EVENT_RETIRED = 1,
65 KGSL_EVENT_CANCELLED = 2,
66};
67
68#define KGSL_FLAG_WAKE_ON_TOUCH BIT(0)
Harshdeep Dhatt67d23bf2019-09-09 11:05:48 -060069#define KGSL_FLAG_SPARSE BIT(1)
Shrenuj Bansala419c792016-10-20 14:05:11 -070070
71/*
72 * "list" of event types for ftrace symbolic magic
73 */
74
75#define KGSL_EVENT_TYPES \
76 { KGSL_EVENT_RETIRED, "retired" }, \
77 { KGSL_EVENT_CANCELLED, "cancelled" }
78
79#define KGSL_CONTEXT_FLAGS \
80 { KGSL_CONTEXT_NO_GMEM_ALLOC, "NO_GMEM_ALLOC" }, \
81 { KGSL_CONTEXT_PREAMBLE, "PREAMBLE" }, \
82 { KGSL_CONTEXT_TRASH_STATE, "TRASH_STATE" }, \
83 { KGSL_CONTEXT_CTX_SWITCH, "CTX_SWITCH" }, \
84 { KGSL_CONTEXT_PER_CONTEXT_TS, "PER_CONTEXT_TS" }, \
85 { KGSL_CONTEXT_USER_GENERATED_TS, "USER_TS" }, \
86 { KGSL_CONTEXT_NO_FAULT_TOLERANCE, "NO_FT" }, \
Hareesh Gunduccfb89b2017-04-14 18:36:20 +053087 { KGSL_CONTEXT_INVALIDATE_ON_FAULT, "INVALIDATE_ON_FAULT" }, \
Shrenuj Bansala419c792016-10-20 14:05:11 -070088 { KGSL_CONTEXT_PWR_CONSTRAINT, "PWR" }, \
Harshdeep Dhatt144c0092017-08-09 10:12:13 -060089 { KGSL_CONTEXT_SAVE_GMEM, "SAVE_GMEM" }, \
90 { KGSL_CONTEXT_IFH_NOP, "IFH_NOP" }, \
91 { KGSL_CONTEXT_SECURE, "SECURE" }, \
92 { KGSL_CONTEXT_NO_SNAPSHOT, "NO_SNAPSHOT" }, \
93 { KGSL_CONTEXT_SPARSE, "SPARSE" }
94
Shrenuj Bansala419c792016-10-20 14:05:11 -070095
96#define KGSL_CONTEXT_TYPES \
97 { KGSL_CONTEXT_TYPE_ANY, "ANY" }, \
98 { KGSL_CONTEXT_TYPE_GL, "GL" }, \
99 { KGSL_CONTEXT_TYPE_CL, "CL" }, \
100 { KGSL_CONTEXT_TYPE_C2D, "C2D" }, \
Rajesh Kemisettic05883a2018-09-17 11:34:08 +0530101 { KGSL_CONTEXT_TYPE_RS, "RS" }, \
102 { KGSL_CONTEXT_TYPE_VK, "VK" }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700103
104#define KGSL_CONTEXT_ID(_context) \
105 ((_context != NULL) ? (_context)->id : KGSL_MEMSTORE_GLOBAL)
106
107/* Allocate 600K for the snapshot static region*/
108#define KGSL_SNAPSHOT_MEMSIZE (600 * 1024)
Urvashi Agrawal53245652018-03-08 14:42:15 -0800109#define MAX_L3_LEVELS 3
Shrenuj Bansala419c792016-10-20 14:05:11 -0700110
111struct kgsl_device;
112struct platform_device;
113struct kgsl_device_private;
114struct kgsl_context;
115struct kgsl_power_stats;
116struct kgsl_event;
117struct kgsl_snapshot;
118
119struct kgsl_functable {
120 /* Mandatory functions - these functions must be implemented
121 * by the client device. The driver will not check for a NULL
122 * pointer before calling the hook.
123 */
124 void (*regread)(struct kgsl_device *device,
125 unsigned int offsetwords, unsigned int *value);
126 void (*regwrite)(struct kgsl_device *device,
127 unsigned int offsetwords, unsigned int value);
128 int (*idle)(struct kgsl_device *device);
129 bool (*isidle)(struct kgsl_device *device);
130 int (*suspend_context)(struct kgsl_device *device);
131 int (*init)(struct kgsl_device *device);
132 int (*start)(struct kgsl_device *device, int priority);
133 int (*stop)(struct kgsl_device *device);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800134 void (*gmu_regread)(struct kgsl_device *device,
135 unsigned int offsetwords, unsigned int *value);
136 void (*gmu_regwrite)(struct kgsl_device *device,
137 unsigned int offsetwords, unsigned int value);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700138 int (*getproperty)(struct kgsl_device *device,
139 unsigned int type, void __user *value,
140 size_t sizebytes);
141 int (*getproperty_compat)(struct kgsl_device *device,
142 unsigned int type, void __user *value,
143 size_t sizebytes);
144 int (*waittimestamp)(struct kgsl_device *device,
145 struct kgsl_context *context, unsigned int timestamp,
146 unsigned int msecs);
147 int (*readtimestamp)(struct kgsl_device *device, void *priv,
148 enum kgsl_timestamp_type type, unsigned int *timestamp);
149 int (*queue_cmds)(struct kgsl_device_private *dev_priv,
150 struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
151 uint32_t count, uint32_t *timestamp);
152 void (*power_stats)(struct kgsl_device *device,
153 struct kgsl_power_stats *stats);
154 unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid);
155 void (*snapshot)(struct kgsl_device *device,
156 struct kgsl_snapshot *snapshot, struct kgsl_context *context);
157 irqreturn_t (*irq_handler)(struct kgsl_device *device);
158 int (*drain)(struct kgsl_device *device);
Harshdeep Dhattf61e3b52014-12-15 13:45:19 -0700159 struct kgsl_device_private * (*device_private_create)(void);
160 void (*device_private_destroy)(struct kgsl_device_private *dev_priv);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700161 /*
162 * Optional functions - these functions are not mandatory. The
163 * driver will check that the function pointer is not NULL before
164 * calling the hook
165 */
166 struct kgsl_context *(*drawctxt_create)(struct kgsl_device_private *,
167 uint32_t *flags);
168 void (*drawctxt_detach)(struct kgsl_context *context);
169 void (*drawctxt_destroy)(struct kgsl_context *context);
170 void (*drawctxt_dump)(struct kgsl_device *device,
171 struct kgsl_context *context);
172 long (*ioctl)(struct kgsl_device_private *dev_priv,
173 unsigned int cmd, unsigned long arg);
174 long (*compat_ioctl)(struct kgsl_device_private *dev_priv,
175 unsigned int cmd, unsigned long arg);
176 int (*setproperty)(struct kgsl_device_private *dev_priv,
177 unsigned int type, void __user *value,
178 unsigned int sizebytes);
179 int (*setproperty_compat)(struct kgsl_device_private *dev_priv,
180 unsigned int type, void __user *value,
181 unsigned int sizebytes);
182 void (*drawctxt_sched)(struct kgsl_device *device,
183 struct kgsl_context *context);
184 void (*resume)(struct kgsl_device *device);
185 int (*regulator_enable)(struct kgsl_device *);
186 bool (*is_hw_collapsible)(struct kgsl_device *);
187 void (*regulator_disable)(struct kgsl_device *);
188 void (*pwrlevel_change_settings)(struct kgsl_device *device,
189 unsigned int prelevel, unsigned int postlevel, bool post);
190 void (*regulator_disable_poll)(struct kgsl_device *device);
191 void (*clk_set_options)(struct kgsl_device *device,
Deepak Kumara309e0e2017-03-17 17:27:42 +0530192 const char *name, struct clk *clk, bool on);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700193 void (*gpu_model)(struct kgsl_device *device, char *str,
194 size_t bufsz);
Hareesh Gundua2fe6ec2017-03-06 14:53:36 +0530195 void (*stop_fault_timer)(struct kgsl_device *device);
Deepak Kumar79908f52018-02-28 11:06:38 +0530196 void (*dispatcher_halt)(struct kgsl_device *device);
197 void (*dispatcher_unhalt)(struct kgsl_device *device);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700198};
199
200struct kgsl_ioctl {
201 unsigned int cmd;
202 long (*func)(struct kgsl_device_private *, unsigned int, void *);
203};
204
205long kgsl_ioctl_helper(struct file *filep, unsigned int cmd, unsigned long arg,
206 const struct kgsl_ioctl *cmds, int len);
207
208/* Flag to mark the memobj_node as a preamble */
209#define MEMOBJ_PREAMBLE BIT(0)
210/* Flag to mark that the memobj_node should not go to the hadrware */
211#define MEMOBJ_SKIP BIT(1)
212
213/**
214 * struct kgsl_memobj_node - Memory object descriptor
215 * @node: Local list node for the object
216 * @id: GPU memory ID for the object
217 * offset: Offset within the object
218 * @gpuaddr: GPU address for the object
219 * @flags: External flags passed by the user
220 * @priv: Internal flags set by the driver
221 */
222struct kgsl_memobj_node {
223 struct list_head node;
224 unsigned int id;
225 uint64_t offset;
226 uint64_t gpuaddr;
227 uint64_t size;
228 unsigned long flags;
229 unsigned long priv;
230};
231
Tarun Karra2b8b3632016-11-14 16:38:27 -0800232/**
233 * struct kgsl_sparseobj_node - Sparse object descriptor
234 * @node: Local list node for the sparse cmdbatch
235 * @virt_id: Virtual ID to bind/unbind
236 * @obj: struct kgsl_sparse_binding_object
237 */
238struct kgsl_sparseobj_node {
239 struct list_head node;
240 unsigned int virt_id;
241 struct kgsl_sparse_binding_object obj;
242};
243
Shrenuj Bansala419c792016-10-20 14:05:11 -0700244struct kgsl_device {
245 struct device *dev;
246 const char *name;
247 unsigned int ver_major;
248 unsigned int ver_minor;
249 uint32_t flags;
250 enum kgsl_deviceid id;
251
252 /* Starting physical address for GPU registers */
253 unsigned long reg_phys;
254
255 /* Starting Kernel virtual address for GPU registers */
256 void __iomem *reg_virt;
257
258 /* Total memory size for all GPU registers */
259 unsigned int reg_len;
260
261 /* Kernel virtual address for GPU shader memory */
262 void __iomem *shader_mem_virt;
263
264 /* Starting physical address for GPU shader memory */
265 unsigned long shader_mem_phys;
266
267 /* GPU shader memory size */
268 unsigned int shader_mem_len;
269 struct kgsl_memdesc memstore;
270 struct kgsl_memdesc scratch;
271 const char *iomemname;
272 const char *shadermemname;
273
274 struct kgsl_mmu mmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800275 struct gmu_device gmu;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700276 struct completion hwaccess_gate;
277 struct completion halt_gate;
278 const struct kgsl_functable *ftbl;
279 struct work_struct idle_check_ws;
280 struct timer_list idle_timer;
281 struct kgsl_pwrctrl pwrctrl;
282 int open_count;
283
Hareesh Gundu2eb74d72017-06-07 14:50:15 +0530284 /* For GPU inline submission */
285 uint32_t submit_now;
286 spinlock_t submit_lock;
287 bool slumber;
288
Shrenuj Bansala419c792016-10-20 14:05:11 -0700289 struct mutex mutex;
290 uint32_t state;
291 uint32_t requested_state;
292
293 atomic_t active_cnt;
294
295 wait_queue_head_t wait_queue;
296 wait_queue_head_t active_cnt_wq;
297 struct platform_device *pdev;
298 struct dentry *d_debugfs;
299 struct idr context_idr;
300 rwlock_t context_lock;
301
302 struct {
303 void *ptr;
304 size_t size;
305 } snapshot_memory;
306
307 struct kgsl_snapshot *snapshot;
308
309 u32 snapshot_faultcount; /* Total number of faults since boot */
310 bool force_panic; /* Force panic after snapshot dump */
Kyle Pieferf698a7f2018-01-10 10:19:03 -0800311 bool prioritize_unrecoverable; /* Overwrite with new GMU snapshots */
Shrenuj Bansala419c792016-10-20 14:05:11 -0700312
313 /* Use CP Crash dumper to get GPU snapshot*/
314 bool snapshot_crashdumper;
Harshdeep Dhatt134f7af2017-05-17 13:54:41 -0600315 /* Use HOST side register reads to get GPU snapshot*/
316 bool snapshot_legacy;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700317
318 struct kobject snapshot_kobj;
319
320 struct kobject ppd_kobj;
321
322 /* Logging levels */
323 int cmd_log;
324 int ctxt_log;
325 int drv_log;
326 int mem_log;
327 int pwr_log;
328 struct kgsl_pwrscale pwrscale;
329
330 int reset_counter; /* Track how many GPU core resets have occurred */
331 struct workqueue_struct *events_wq;
332
333 struct device *busmondev; /* pseudo dev for GPU BW voting governor */
334
335 /* Number of active contexts seen globally for this device */
336 int active_context_count;
337 struct kobject *gpu_sysfs_kobj;
Urvashi Agrawal53245652018-03-08 14:42:15 -0800338 struct clk *l3_clk;
339 unsigned int l3_freq[MAX_L3_LEVELS];
340 unsigned int num_l3_pwrlevels;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700341};
342
343#define KGSL_MMU_DEVICE(_mmu) \
344 container_of((_mmu), struct kgsl_device, mmu)
345
346#define KGSL_DEVICE_COMMON_INIT(_dev) \
347 .hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
348 .halt_gate = COMPLETION_INITIALIZER((_dev).halt_gate),\
349 .idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
350 kgsl_idle_check),\
351 .context_idr = IDR_INIT((_dev).context_idr),\
352 .wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
353 .active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\
354 .mutex = __MUTEX_INITIALIZER((_dev).mutex),\
355 .state = KGSL_STATE_NONE,\
356 .ver_major = DRIVER_VERSION_MAJOR,\
357 .ver_minor = DRIVER_VERSION_MINOR
358
359
360/**
361 * enum bits for struct kgsl_context.priv
Hareesh Gundu7d5a8f22017-02-21 13:23:46 +0530362 * @KGSL_CONTEXT_PRIV_SUBMITTED - The context has submitted commands to gpu.
Shrenuj Bansala419c792016-10-20 14:05:11 -0700363 * @KGSL_CONTEXT_PRIV_DETACHED - The context has been destroyed by userspace
364 * and is no longer using the gpu.
365 * @KGSL_CONTEXT_PRIV_INVALID - The context has been destroyed by the kernel
366 * because it caused a GPU fault.
367 * @KGSL_CONTEXT_PRIV_PAGEFAULT - The context has caused a page fault.
368 * @KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC - this value and higher values are
369 * reserved for devices specific use.
370 */
371enum kgsl_context_priv {
Hareesh Gundu7d5a8f22017-02-21 13:23:46 +0530372 KGSL_CONTEXT_PRIV_SUBMITTED = 0,
373 KGSL_CONTEXT_PRIV_DETACHED,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700374 KGSL_CONTEXT_PRIV_INVALID,
375 KGSL_CONTEXT_PRIV_PAGEFAULT,
376 KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC = 16,
377};
378
379struct kgsl_process_private;
380
381/**
382 * struct kgsl_context - The context fields that are valid for a user defined
383 * context
384 * @refcount: kref object for reference counting the context
385 * @id: integer identifier for the context
386 * @priority; The context's priority to submit commands to GPU
387 * @tid: task that created this context.
388 * @dev_priv: pointer to the owning device instance
389 * @proc_priv: pointer to process private, the process that allocated the
390 * context
391 * @priv: in-kernel context flags, use KGSL_CONTEXT_* values
392 * @reset_status: status indication whether a gpu reset occurred and whether
393 * this context was responsible for causing it
394 * @timeline: sync timeline used to create fences that can be signaled when a
395 * sync_pt timestamp expires
396 * @events: A kgsl_event_group for this context - contains the list of GPU
397 * events
398 * @flags: flags from userspace controlling the behavior of this context
399 * @pwr_constraint: power constraint from userspace for this context
400 * @fault_count: number of times gpu hanged in last _context_throttle_time ms
401 * @fault_time: time of the first gpu hang in last _context_throttle_time ms
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600402 * @user_ctxt_record: memory descriptor used by CP to save/restore VPC data
403 * across preemption
Shrenuj Bansala419c792016-10-20 14:05:11 -0700404 */
405struct kgsl_context {
406 struct kref refcount;
407 uint32_t id;
408 uint32_t priority;
409 pid_t tid;
410 struct kgsl_device_private *dev_priv;
411 struct kgsl_process_private *proc_priv;
412 unsigned long priv;
413 struct kgsl_device *device;
414 unsigned int reset_status;
Lynus Vazc031a9b2017-01-25 13:00:13 +0530415 struct kgsl_sync_timeline *ktimeline;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700416 struct kgsl_event_group events;
417 unsigned int flags;
418 struct kgsl_pwr_constraint pwr_constraint;
Urvashi Agrawal53245652018-03-08 14:42:15 -0800419 struct kgsl_pwr_constraint l3_pwr_constraint;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700420 unsigned int fault_count;
421 unsigned long fault_time;
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600422 struct kgsl_mem_entry *user_ctxt_record;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700423};
424
425#define _context_comm(_c) \
426 (((_c) && (_c)->proc_priv) ? (_c)->proc_priv->comm : "unknown")
427
428/*
429 * Print log messages with the context process name/pid:
430 * [...] kgsl kgsl-3d0: kgsl-api-test[22182]:
431 */
432
433#define pr_context(_d, _c, fmt, args...) \
434 dev_err((_d)->dev, "%s[%d]: " fmt, \
435 _context_comm((_c)), \
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530436 pid_nr((_c)->proc_priv->pid), ##args)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700437
438/**
439 * struct kgsl_process_private - Private structure for a KGSL process (across
440 * all devices)
441 * @priv: Internal flags, use KGSL_PROCESS_* values
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530442 * @pid: Identification structure for the task owner of the process
Shrenuj Bansala419c792016-10-20 14:05:11 -0700443 * @comm: task name of the process
444 * @mem_lock: Spinlock to protect the process memory lists
445 * @refcount: kref object for reference counting the process
446 * @idr: Iterator for assigning IDs to memory allocations
447 * @pagetable: Pointer to the pagetable owned by this process
448 * @kobj: Pointer to a kobj for the sysfs directory for this process
449 * @debug_root: Pointer to the debugfs root for this process
450 * @stats: Memory allocation statistics for this process
Amit Kushwaha7c843c22018-04-09 20:41:14 +0530451 * @gpumem_mapped: KGSL memory mapped in the process address space
Shrenuj Bansala419c792016-10-20 14:05:11 -0700452 * @syncsource_idr: sync sources created by this process
453 * @syncsource_lock: Spinlock to protect the syncsource idr
454 * @fd_count: Counter for the number of FDs for this process
Harshdeep Dhatt410e83932017-12-12 14:56:20 -0700455 * @ctxt_count: Count for the number of contexts for this process
Deepak Kumare0d19f92018-03-05 16:51:25 +0530456 * @ctxt_count_lock: Spinlock to protect ctxt_count
Shrenuj Bansala419c792016-10-20 14:05:11 -0700457 */
458struct kgsl_process_private {
459 unsigned long priv;
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530460 struct pid *pid;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700461 char comm[TASK_COMM_LEN];
462 spinlock_t mem_lock;
463 struct kref refcount;
464 struct idr mem_idr;
465 struct kgsl_pagetable *pagetable;
466 struct list_head list;
467 struct kobject kobj;
468 struct dentry *debug_root;
469 struct {
470 uint64_t cur;
471 uint64_t max;
472 } stats[KGSL_MEM_ENTRY_MAX];
Amit Kushwaha7c843c22018-04-09 20:41:14 +0530473 uint64_t gpumem_mapped;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700474 struct idr syncsource_idr;
475 spinlock_t syncsource_lock;
476 int fd_count;
Harshdeep Dhatt410e83932017-12-12 14:56:20 -0700477 atomic_t ctxt_count;
Deepak Kumare0d19f92018-03-05 16:51:25 +0530478 spinlock_t ctxt_count_lock;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700479};
480
481/**
482 * enum kgsl_process_priv_flags - Private flags for kgsl_process_private
483 * @KGSL_PROCESS_INIT: Set if the process structure has been set up
484 */
485enum kgsl_process_priv_flags {
486 KGSL_PROCESS_INIT = 0,
487};
488
489struct kgsl_device_private {
490 struct kgsl_device *device;
491 struct kgsl_process_private *process_priv;
492};
493
494/**
495 * struct kgsl_snapshot - details for a specific snapshot instance
496 * @ib1base: Active IB1 base address at the time of fault
497 * @ib2base: Active IB2 base address at the time of fault
498 * @ib1size: Number of DWORDS pending in IB1 at the time of fault
499 * @ib2size: Number of DWORDS pending in IB2 at the time of fault
500 * @ib1dumped: Active IB1 dump status to sansphot binary
501 * @ib2dumped: Active IB2 dump status to sansphot binary
502 * @start: Pointer to the start of the static snapshot region
503 * @size: Size of the current snapshot instance
504 * @ptr: Pointer to the next block of memory to write to during snapshotting
505 * @remain: Bytes left in the snapshot region
506 * @timestamp: Timestamp of the snapshot instance (in seconds since boot)
507 * @mempool: Pointer to the memory pool for storing memory objects
508 * @mempool_size: Size of the memory pool
509 * @obj_list: List of frozen GPU buffers that are waiting to be dumped.
510 * @cp_list: List of IB's to be dumped.
511 * @work: worker to dump the frozen memory
512 * @dump_gate: completion gate signaled by worker when it is finished.
513 * @process: the process that caused the hang, if known.
Lynus Vaz43695aa2017-09-01 21:55:23 +0530514 * @sysfs_read: Count of current reads via sysfs
515 * @first_read: True until the snapshot read is started
516 * @gmu_fault: Snapshot collected when GMU fault happened
George Shena4114092017-12-01 11:27:47 -0800517 * @recovered: True if GPU was recovered after previous snapshot
Shrenuj Bansala419c792016-10-20 14:05:11 -0700518 */
519struct kgsl_snapshot {
520 uint64_t ib1base;
521 uint64_t ib2base;
522 unsigned int ib1size;
523 unsigned int ib2size;
524 bool ib1dumped;
525 bool ib2dumped;
526 u8 *start;
527 size_t size;
528 u8 *ptr;
529 size_t remain;
530 unsigned long timestamp;
531 u8 *mempool;
532 size_t mempool_size;
533 struct list_head obj_list;
534 struct list_head cp_list;
535 struct work_struct work;
536 struct completion dump_gate;
537 struct kgsl_process_private *process;
Lynus Vaz43695aa2017-09-01 21:55:23 +0530538 unsigned int sysfs_read;
539 bool first_read;
540 bool gmu_fault;
George Shena4114092017-12-01 11:27:47 -0800541 bool recovered;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700542};
543
544/**
545 * struct kgsl_snapshot_object - GPU memory in the snapshot
546 * @gpuaddr: The GPU address identified during snapshot
547 * @size: The buffer size identified during snapshot
548 * @offset: offset from start of the allocated kgsl_mem_entry
549 * @type: SNAPSHOT_OBJ_TYPE_* identifier.
550 * @entry: the reference counted memory entry for this buffer
551 * @node: node for kgsl_snapshot.obj_list
552 */
553struct kgsl_snapshot_object {
554 uint64_t gpuaddr;
555 uint64_t size;
556 uint64_t offset;
557 int type;
558 struct kgsl_mem_entry *entry;
559 struct list_head node;
560};
561
562struct kgsl_device *kgsl_get_device(int dev_idx);
563
564static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
565 unsigned int type, uint64_t size)
566{
567 priv->stats[type].cur += size;
568 if (priv->stats[type].max < priv->stats[type].cur)
569 priv->stats[type].max = priv->stats[type].cur;
570}
571
Lynus Vazd37f1d82017-05-24 16:39:15 +0530572static inline bool kgsl_is_register_offset(struct kgsl_device *device,
573 unsigned int offsetwords)
574{
575 return ((offsetwords * sizeof(uint32_t)) < device->reg_len);
576}
577
578static inline bool kgsl_is_gmu_offset(struct kgsl_device *device,
579 unsigned int offsetwords)
580{
581 struct gmu_device *gmu = &device->gmu;
582
583 return (gmu->pdev &&
584 (offsetwords >= gmu->gmu2gpu_offset) &&
585 ((offsetwords - gmu->gmu2gpu_offset) * sizeof(uint32_t) <
586 gmu->reg_len));
587}
588
Shrenuj Bansala419c792016-10-20 14:05:11 -0700589static inline void kgsl_regread(struct kgsl_device *device,
590 unsigned int offsetwords,
591 unsigned int *value)
592{
Lynus Vazd37f1d82017-05-24 16:39:15 +0530593 if (kgsl_is_register_offset(device, offsetwords))
594 device->ftbl->regread(device, offsetwords, value);
595 else if (device->ftbl->gmu_regread &&
596 kgsl_is_gmu_offset(device, offsetwords))
597 device->ftbl->gmu_regread(device, offsetwords, value);
598 else {
599 WARN(1, "Out of bounds register read: 0x%x\n", offsetwords);
600 *value = 0;
601 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700602}
603
604static inline void kgsl_regwrite(struct kgsl_device *device,
605 unsigned int offsetwords,
606 unsigned int value)
607{
Lynus Vazd37f1d82017-05-24 16:39:15 +0530608 if (kgsl_is_register_offset(device, offsetwords))
609 device->ftbl->regwrite(device, offsetwords, value);
610 else if (device->ftbl->gmu_regwrite &&
611 kgsl_is_gmu_offset(device, offsetwords))
612 device->ftbl->gmu_regwrite(device, offsetwords, value);
613 else
614 WARN(1, "Out of bounds register write: 0x%x\n", offsetwords);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700615}
616
Kyle Pieferb1027b02017-02-10 13:58:58 -0800617static inline void kgsl_gmu_regread(struct kgsl_device *device,
618 unsigned int offsetwords,
619 unsigned int *value)
620{
621 if (device->ftbl->gmu_regread)
622 device->ftbl->gmu_regread(device, offsetwords, value);
623 else
Carter Cooper83454bf2017-03-20 11:26:04 -0600624 *value = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800625}
626
627static inline void kgsl_gmu_regwrite(struct kgsl_device *device,
628 unsigned int offsetwords,
629 unsigned int value)
630{
631 if (device->ftbl->gmu_regwrite)
632 device->ftbl->gmu_regwrite(device, offsetwords, value);
633}
634
Shrenuj Bansala419c792016-10-20 14:05:11 -0700635static inline void kgsl_regrmw(struct kgsl_device *device,
636 unsigned int offsetwords,
637 unsigned int mask, unsigned int bits)
638{
639 unsigned int val = 0;
640
Lynus Vazd37f1d82017-05-24 16:39:15 +0530641 kgsl_regread(device, offsetwords, &val);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700642 val &= ~mask;
Lynus Vazd37f1d82017-05-24 16:39:15 +0530643 kgsl_regwrite(device, offsetwords, val | bits);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700644}
645
Kyle Pieferdc0706c2017-04-13 13:17:50 -0700646static inline void kgsl_gmu_regrmw(struct kgsl_device *device,
647 unsigned int offsetwords,
648 unsigned int mask, unsigned int bits)
649{
650 unsigned int val = 0;
651
652 kgsl_gmu_regread(device, offsetwords, &val);
653 val &= ~mask;
654 kgsl_gmu_regwrite(device, offsetwords, val | bits);
655}
656
Shrenuj Bansala419c792016-10-20 14:05:11 -0700657static inline int kgsl_idle(struct kgsl_device *device)
658{
659 return device->ftbl->idle(device);
660}
661
662static inline unsigned int kgsl_gpuid(struct kgsl_device *device,
663 unsigned int *chipid)
664{
665 return device->ftbl->gpuid(device, chipid);
666}
667
668static inline int kgsl_create_device_sysfs_files(struct device *root,
669 const struct device_attribute **list)
670{
671 int ret = 0, i;
672
673 for (i = 0; list[i] != NULL; i++)
674 ret |= device_create_file(root, list[i]);
675 return ret;
676}
677
678static inline void kgsl_remove_device_sysfs_files(struct device *root,
679 const struct device_attribute **list)
680{
681 int i;
682
683 for (i = 0; list[i] != NULL; i++)
684 device_remove_file(root, list[i]);
685}
686
687static inline struct kgsl_device *kgsl_device_from_dev(struct device *dev)
688{
689 int i;
690
691 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
692 if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->dev == dev)
693 return kgsl_driver.devp[i];
694 }
695
696 return NULL;
697}
698
699static inline int kgsl_state_is_awake(struct kgsl_device *device)
700{
George Shen6927d8f2017-07-19 11:38:10 -0700701 struct gmu_device *gmu = &device->gmu;
702
Shrenuj Bansala419c792016-10-20 14:05:11 -0700703 if (device->state == KGSL_STATE_ACTIVE ||
704 device->state == KGSL_STATE_AWARE)
705 return true;
George Shen6927d8f2017-07-19 11:38:10 -0700706 else if (kgsl_gmu_isenabled(device) &&
707 test_bit(GMU_CLK_ON, &gmu->flags))
708 return true;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700709 else
710 return false;
711}
712
713int kgsl_readtimestamp(struct kgsl_device *device, void *priv,
714 enum kgsl_timestamp_type type, unsigned int *timestamp);
715
716int kgsl_check_timestamp(struct kgsl_device *device,
717 struct kgsl_context *context, unsigned int timestamp);
718
719int kgsl_device_platform_probe(struct kgsl_device *device);
720
721void kgsl_device_platform_remove(struct kgsl_device *device);
722
723const char *kgsl_pwrstate_to_str(unsigned int state);
724
725int kgsl_device_snapshot_init(struct kgsl_device *device);
726void kgsl_device_snapshot(struct kgsl_device *device,
Lynus Vaz43695aa2017-09-01 21:55:23 +0530727 struct kgsl_context *context, bool gmu_fault);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700728void kgsl_device_snapshot_close(struct kgsl_device *device);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700729
730void kgsl_events_init(void);
731void kgsl_events_exit(void);
732
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600733void kgsl_context_detach(struct kgsl_context *context);
734
Shrenuj Bansala419c792016-10-20 14:05:11 -0700735void kgsl_del_event_group(struct kgsl_event_group *group);
736
737void kgsl_add_event_group(struct kgsl_event_group *group,
738 struct kgsl_context *context, const char *name,
739 readtimestamp_func readtimestamp, void *priv);
740
741void kgsl_cancel_events_timestamp(struct kgsl_device *device,
742 struct kgsl_event_group *group, unsigned int timestamp);
743void kgsl_cancel_events(struct kgsl_device *device,
744 struct kgsl_event_group *group);
745void kgsl_cancel_event(struct kgsl_device *device,
746 struct kgsl_event_group *group, unsigned int timestamp,
747 kgsl_event_func func, void *priv);
748bool kgsl_event_pending(struct kgsl_device *device,
749 struct kgsl_event_group *group, unsigned int timestamp,
750 kgsl_event_func func, void *priv);
751int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group,
752 unsigned int timestamp, kgsl_event_func func, void *priv);
753void kgsl_process_event_group(struct kgsl_device *device,
754 struct kgsl_event_group *group);
755void kgsl_flush_event_group(struct kgsl_device *device,
756 struct kgsl_event_group *group);
757void kgsl_process_event_groups(struct kgsl_device *device);
758
759void kgsl_context_destroy(struct kref *kref);
760
761int kgsl_context_init(struct kgsl_device_private *dev_priv,
762 struct kgsl_context *context);
763
764void kgsl_context_dump(struct kgsl_context *context);
765
766int kgsl_memfree_find_entry(pid_t ptname, uint64_t *gpuaddr,
767 uint64_t *size, uint64_t *flags, pid_t *pid);
768
769long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
770
771long kgsl_ioctl_copy_in(unsigned int kernel_cmd, unsigned int user_cmd,
772 unsigned long arg, unsigned char *ptr);
773
774long kgsl_ioctl_copy_out(unsigned int kernel_cmd, unsigned int user_cmd,
775 unsigned long arg, unsigned char *ptr);
776
Tarun Karra2b8b3632016-11-14 16:38:27 -0800777void kgsl_sparse_bind(struct kgsl_process_private *private,
778 struct kgsl_drawobj_sparse *sparse);
779
Shrenuj Bansala419c792016-10-20 14:05:11 -0700780/**
781 * kgsl_context_put() - Release context reference count
782 * @context: Pointer to the KGSL context to be released
783 *
784 * Reduce the reference count on a KGSL context and destroy it if it is no
785 * longer needed
786 */
787static inline void
788kgsl_context_put(struct kgsl_context *context)
789{
790 if (context)
791 kref_put(&context->refcount, kgsl_context_destroy);
792}
793
794/**
795 * kgsl_context_detached() - check if a context is detached
796 * @context: the context
797 *
798 * Check if a context has been destroyed by userspace and is only waiting
799 * for reference counts to go away. This check is used to weed out
800 * contexts that shouldn't use the gpu so NULL is considered detached.
801 */
802static inline bool kgsl_context_detached(struct kgsl_context *context)
803{
804 return (context == NULL || test_bit(KGSL_CONTEXT_PRIV_DETACHED,
805 &context->priv));
806}
807
808/**
809 * kgsl_context_invalid() - check if a context is invalid
810 * @context: the context
811 *
812 * Check if a context has been invalidated by the kernel and may no
813 * longer use the GPU.
814 */
815static inline bool kgsl_context_invalid(struct kgsl_context *context)
816{
817 return (context == NULL || test_bit(KGSL_CONTEXT_PRIV_INVALID,
818 &context->priv));
819}
820
821
822/**
823 * kgsl_context_get() - get a pointer to a KGSL context
824 * @device: Pointer to the KGSL device that owns the context
825 * @id: Context ID
826 *
827 * Find the context associated with the given ID number, increase the reference
828 * count on it and return it. The caller must make sure that this call is
829 * paired with a kgsl_context_put. This function is for internal use because it
830 * doesn't validate the ownership of the context with the calling process - use
831 * kgsl_context_get_owner for that
832 */
833static inline struct kgsl_context *kgsl_context_get(struct kgsl_device *device,
834 uint32_t id)
835{
836 int result = 0;
837 struct kgsl_context *context = NULL;
838
839 read_lock(&device->context_lock);
840
841 context = idr_find(&device->context_idr, id);
842
843 /* Don't return a context that has been detached */
844 if (kgsl_context_detached(context))
845 context = NULL;
846 else
847 result = kref_get_unless_zero(&context->refcount);
848
849 read_unlock(&device->context_lock);
850
851 if (!result)
852 return NULL;
853 return context;
854}
855
856/**
857 * _kgsl_context_get() - lightweight function to just increment the ref count
858 * @context: Pointer to the KGSL context
859 *
860 * Get a reference to the specified KGSL context structure. This is a
861 * lightweight way to just increase the refcount on a known context rather than
862 * walking through kgsl_context_get and searching the iterator
863 */
864static inline int _kgsl_context_get(struct kgsl_context *context)
865{
866 int ret = 0;
867
868 if (context)
869 ret = kref_get_unless_zero(&context->refcount);
870
871 return ret;
872}
873
874/**
875 * kgsl_context_get_owner() - get a pointer to a KGSL context in a specific
876 * process
877 * @dev_priv: Pointer to the process struct
878 * @id: Context ID to return
879 *
880 * Find the context associated with the given ID number, increase the reference
881 * count on it and return it. The caller must make sure that this call is
882 * paired with a kgsl_context_put. This function validates that the context id
883 * given is owned by the dev_priv instancet that is passed in. See
884 * kgsl_context_get for the internal version that doesn't do the check
885 */
886static inline struct kgsl_context *kgsl_context_get_owner(
887 struct kgsl_device_private *dev_priv, uint32_t id)
888{
889 struct kgsl_context *context;
890
891 context = kgsl_context_get(dev_priv->device, id);
892
893 /* Verify that the context belongs to current calling fd. */
894 if (context != NULL && context->dev_priv != dev_priv) {
895 kgsl_context_put(context);
896 return NULL;
897 }
898
899 return context;
900}
901
902/**
903 * kgsl_process_private_get() - increment the refcount on a
904 * kgsl_process_private struct
905 * @process: Pointer to the KGSL process_private
906 *
907 * Returns 0 if the structure is invalid and a reference count could not be
908 * obtained, nonzero otherwise.
909 */
910static inline int kgsl_process_private_get(struct kgsl_process_private *process)
911{
912 int ret = 0;
913
914 if (process != NULL)
915 ret = kref_get_unless_zero(&process->refcount);
916 return ret;
917}
918
919void kgsl_process_private_put(struct kgsl_process_private *private);
920
921
922struct kgsl_process_private *kgsl_process_private_find(pid_t pid);
923
924/**
925 * kgsl_property_read_u32() - Read a u32 property from the device tree
926 * @device: Pointer to the KGSL device
927 * @prop: String name of the property to query
928 * @ptr: Pointer to the variable to store the property
929 */
930static inline int kgsl_property_read_u32(struct kgsl_device *device,
931 const char *prop, unsigned int *ptr)
932{
933 return of_property_read_u32(device->pdev->dev.of_node, prop, ptr);
934}
935
936/**
937 * kgsl_sysfs_store() - parse a string from a sysfs store function
938 * @buf: Incoming string to parse
939 * @ptr: Pointer to an unsigned int to store the value
940 */
941static inline int kgsl_sysfs_store(const char *buf, unsigned int *ptr)
942{
943 unsigned int val;
944 int rc;
945
946 rc = kstrtou32(buf, 0, &val);
947 if (rc)
948 return rc;
949
950 if (ptr)
951 *ptr = val;
952
953 return 0;
954}
955
956/*
957 * A helper macro to print out "not enough memory functions" - this
958 * makes it easy to standardize the messages as well as cut down on
959 * the number of strings in the binary
960 */
961#define SNAPSHOT_ERR_NOMEM(_d, _s) \
962 KGSL_DRV_ERR((_d), \
963 "snapshot: not enough snapshot memory for section %s\n", (_s))
964
965/**
966 * struct kgsl_snapshot_registers - list of registers to snapshot
967 * @regs: Pointer to an array of register ranges
968 * @count: Number of entries in the array
969 */
970struct kgsl_snapshot_registers {
971 const unsigned int *regs;
972 unsigned int count;
973};
974
975size_t kgsl_snapshot_dump_registers(struct kgsl_device *device, u8 *buf,
976 size_t remain, void *priv);
977
978void kgsl_snapshot_indexed_registers(struct kgsl_device *device,
979 struct kgsl_snapshot *snapshot, unsigned int index,
980 unsigned int data, unsigned int start, unsigned int count);
981
982int kgsl_snapshot_get_object(struct kgsl_snapshot *snapshot,
983 struct kgsl_process_private *process, uint64_t gpuaddr,
984 uint64_t size, unsigned int type);
985
986int kgsl_snapshot_have_object(struct kgsl_snapshot *snapshot,
987 struct kgsl_process_private *process,
988 uint64_t gpuaddr, uint64_t size);
989
990struct adreno_ib_object_list;
991
992int kgsl_snapshot_add_ib_obj_list(struct kgsl_snapshot *snapshot,
993 struct adreno_ib_object_list *ib_obj_list);
994
995void kgsl_snapshot_add_section(struct kgsl_device *device, u16 id,
996 struct kgsl_snapshot *snapshot,
997 size_t (*func)(struct kgsl_device *, u8 *, size_t, void *),
998 void *priv);
999
1000/**
1001 * struct kgsl_pwr_limit - limit structure for each client
1002 * @node: Local list node for the limits list
1003 * @level: requested power level
1004 * @device: pointer to the device structure
1005 */
1006struct kgsl_pwr_limit {
1007 struct list_head node;
1008 unsigned int level;
1009 struct kgsl_device *device;
1010};
1011
1012#endif /* __KGSL_DEVICE_H */