blob: 79437458c93a74cd2b412462390867590ada8ee1 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/module.h>
14#include <linux/uaccess.h>
15#include <linux/sched.h>
16#include <linux/of.h>
17#include <linux/of_device.h>
18#include <linux/delay.h>
19#include <linux/input.h>
20#include <soc/qcom/scm.h>
21
22#include <linux/msm-bus-board.h>
23#include <linux/msm-bus.h>
24
25#include "kgsl.h"
26#include "kgsl_pwrscale.h"
27#include "kgsl_sharedmem.h"
28#include "kgsl_iommu.h"
29#include "kgsl_trace.h"
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060030#include "adreno_llc.h"
Shrenuj Bansala419c792016-10-20 14:05:11 -070031
32#include "adreno.h"
33#include "adreno_iommu.h"
34#include "adreno_compat.h"
35#include "adreno_pm4types.h"
36#include "adreno_trace.h"
37
38#include "a3xx_reg.h"
39#include "adreno_snapshot.h"
40
41/* Include the master list of GPU cores that are supported */
42#include "adreno-gpulist.h"
43#include "adreno_dispatch.h"
44
45#undef MODULE_PARAM_PREFIX
46#define MODULE_PARAM_PREFIX "adreno."
47
48static bool nopreempt;
49module_param(nopreempt, bool, 0444);
50MODULE_PARM_DESC(nopreempt, "Disable GPU preemption");
51
Shrenuj Bansalae672812016-02-24 14:17:30 -080052static bool swfdetect;
53module_param(swfdetect, bool, 0444);
54MODULE_PARM_DESC(swfdetect, "Enable soft fault detection");
55
Shrenuj Bansala419c792016-10-20 14:05:11 -070056#define DRIVER_VERSION_MAJOR 3
57#define DRIVER_VERSION_MINOR 1
58
Shrenuj Bansala419c792016-10-20 14:05:11 -070059#define KGSL_LOG_LEVEL_DEFAULT 3
60
61static void adreno_input_work(struct work_struct *work);
62static unsigned int counter_delta(struct kgsl_device *device,
63 unsigned int reg, unsigned int *counter);
64
65static struct devfreq_msm_adreno_tz_data adreno_tz_data = {
66 .bus = {
67 .max = 350,
68 },
69 .device_id = KGSL_DEVICE_3D0,
70};
71
72static const struct kgsl_functable adreno_functable;
73
74static struct adreno_device device_3d0 = {
75 .dev = {
76 KGSL_DEVICE_COMMON_INIT(device_3d0.dev),
77 .pwrscale = KGSL_PWRSCALE_INIT(&adreno_tz_data),
78 .name = DEVICE_3D0_NAME,
79 .id = KGSL_DEVICE_3D0,
Kyle Pieferb1027b02017-02-10 13:58:58 -080080 .gmu = {
81 .load_mode = TCM_BOOT,
82 },
Shrenuj Bansala419c792016-10-20 14:05:11 -070083 .pwrctrl = {
84 .irq_name = "kgsl_3d0_irq",
85 },
86 .iomemname = "kgsl_3d0_reg_memory",
87 .shadermemname = "kgsl_3d0_shader_memory",
88 .ftbl = &adreno_functable,
89 .cmd_log = KGSL_LOG_LEVEL_DEFAULT,
90 .ctxt_log = KGSL_LOG_LEVEL_DEFAULT,
91 .drv_log = KGSL_LOG_LEVEL_DEFAULT,
92 .mem_log = KGSL_LOG_LEVEL_DEFAULT,
93 .pwr_log = KGSL_LOG_LEVEL_DEFAULT,
94 },
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070095 .fw[0] = {
96 .fwvirt = NULL
97 },
98 .fw[1] = {
99 .fwvirt = NULL
100 },
Shrenuj Bansala419c792016-10-20 14:05:11 -0700101 .gmem_size = SZ_256K,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700102 .ft_policy = KGSL_FT_DEFAULT_POLICY,
103 .ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700104 .long_ib_detect = 1,
105 .input_work = __WORK_INITIALIZER(device_3d0.input_work,
106 adreno_input_work),
107 .pwrctrl_flag = BIT(ADRENO_SPTP_PC_CTRL) | BIT(ADRENO_PPD_CTRL) |
108 BIT(ADRENO_LM_CTRL) | BIT(ADRENO_HWCG_CTRL) |
109 BIT(ADRENO_THROTTLING_CTRL),
110 .profile.enabled = false,
111 .active_list = LIST_HEAD_INIT(device_3d0.active_list),
112 .active_list_lock = __SPIN_LOCK_UNLOCKED(device_3d0.active_list_lock),
Sushmita Susheelendrab1976682016-11-07 14:21:11 -0700113 .gpu_llc_slice_enable = true,
Sushmita Susheelendrad3756c02017-01-11 15:05:40 -0700114 .gpuhtw_llc_slice_enable = true,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700115};
116
117/* Ptr to array for the current set of fault detect registers */
118unsigned int *adreno_ft_regs;
119/* Total number of fault detect registers */
120unsigned int adreno_ft_regs_num;
121/* Ptr to array for the current fault detect registers values */
122unsigned int *adreno_ft_regs_val;
123/* Array of default fault detect registers */
124static unsigned int adreno_ft_regs_default[] = {
125 ADRENO_REG_RBBM_STATUS,
126 ADRENO_REG_CP_RB_RPTR,
127 ADRENO_REG_CP_IB1_BASE,
128 ADRENO_REG_CP_IB1_BUFSZ,
129 ADRENO_REG_CP_IB2_BASE,
130 ADRENO_REG_CP_IB2_BUFSZ
131};
132
133/* Nice level for the higher priority GPU start thread */
134int adreno_wake_nice = -7;
135
136/* Number of milliseconds to stay active active after a wake on touch */
137unsigned int adreno_wake_timeout = 100;
138
139/**
140 * adreno_readreg64() - Read a 64bit register by getting its offset from the
141 * offset array defined in gpudev node
142 * @adreno_dev: Pointer to the the adreno device
143 * @lo: lower 32bit register enum that is to be read
144 * @hi: higher 32bit register enum that is to be read
145 * @val: 64 bit Register value read is placed here
146 */
147void adreno_readreg64(struct adreno_device *adreno_dev,
148 enum adreno_regs lo, enum adreno_regs hi, uint64_t *val)
149{
150 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
151 unsigned int val_lo = 0, val_hi = 0;
152 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
153
154 if (adreno_checkreg_off(adreno_dev, lo))
155 kgsl_regread(device, gpudev->reg_offsets->offsets[lo], &val_lo);
156 if (adreno_checkreg_off(adreno_dev, hi))
157 kgsl_regread(device, gpudev->reg_offsets->offsets[hi], &val_hi);
158
159 *val = (val_lo | ((uint64_t)val_hi << 32));
160}
161
162/**
163 * adreno_writereg64() - Write a 64bit register by getting its offset from the
164 * offset array defined in gpudev node
165 * @adreno_dev: Pointer to the the adreno device
166 * @lo: lower 32bit register enum that is to be written
167 * @hi: higher 32bit register enum that is to be written
168 * @val: 64 bit value to write
169 */
170void adreno_writereg64(struct adreno_device *adreno_dev,
171 enum adreno_regs lo, enum adreno_regs hi, uint64_t val)
172{
173 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
174
175 if (adreno_checkreg_off(adreno_dev, lo))
176 kgsl_regwrite(KGSL_DEVICE(adreno_dev),
177 gpudev->reg_offsets->offsets[lo], lower_32_bits(val));
178 if (adreno_checkreg_off(adreno_dev, hi))
179 kgsl_regwrite(KGSL_DEVICE(adreno_dev),
180 gpudev->reg_offsets->offsets[hi], upper_32_bits(val));
181}
182
183/**
184 * adreno_get_rptr() - Get the current ringbuffer read pointer
185 * @rb: Pointer the ringbuffer to query
186 *
187 * Get the latest rptr
188 */
189unsigned int adreno_get_rptr(struct adreno_ringbuffer *rb)
190{
191 struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
192 unsigned int rptr = 0;
193
194 if (adreno_is_a3xx(adreno_dev))
195 adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
196 &rptr);
197 else {
198 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
199
200 kgsl_sharedmem_readl(&device->scratch, &rptr,
201 SCRATCH_RPTR_OFFSET(rb->id));
202 }
203
204 return rptr;
205}
206
207/**
208 * adreno_of_read_property() - Adreno read property
209 * @node: Device node
210 *
211 * Read a u32 property.
212 */
213static inline int adreno_of_read_property(struct device_node *node,
214 const char *prop, unsigned int *ptr)
215{
216 int ret = of_property_read_u32(node, prop, ptr);
217
218 if (ret)
219 KGSL_CORE_ERR("Unable to read '%s'\n", prop);
220 return ret;
221}
222
223static void __iomem *efuse_base;
224static size_t efuse_len;
225
226int adreno_efuse_map(struct adreno_device *adreno_dev)
227{
228 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
229 struct resource *res;
230
231 if (efuse_base != NULL)
232 return 0;
233
234 res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
235 "qfprom_memory");
236
237 if (res == NULL)
238 return -ENODEV;
239
240 efuse_base = ioremap(res->start, resource_size(res));
241 if (efuse_base == NULL)
242 return -ENODEV;
243
244 efuse_len = resource_size(res);
245 return 0;
246}
247
248void adreno_efuse_unmap(struct adreno_device *adreno_dev)
249{
250 if (efuse_base != NULL) {
251 iounmap(efuse_base);
252 efuse_base = NULL;
253 efuse_len = 0;
254 }
255}
256
257int adreno_efuse_read_u32(struct adreno_device *adreno_dev, unsigned int offset,
258 unsigned int *val)
259{
260 if (efuse_base == NULL)
261 return -ENODEV;
262
263 if (offset >= efuse_len)
264 return -ERANGE;
265
266 if (val != NULL) {
267 *val = readl_relaxed(efuse_base + offset);
268 /* Make sure memory is updated before returning */
269 rmb();
270 }
271
272 return 0;
273}
274
275static int _get_counter(struct adreno_device *adreno_dev,
276 int group, int countable, unsigned int *lo,
277 unsigned int *hi)
278{
279 int ret = 0;
280
281 if (*lo == 0) {
282
283 ret = adreno_perfcounter_get(adreno_dev, group, countable,
284 lo, hi, PERFCOUNTER_FLAG_KERNEL);
285
286 if (ret) {
287 KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
288 "Unable to allocate fault detect performance counter %d/%d\n",
289 group, countable);
290 KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
291 "GPU fault detect will be less reliable\n");
292 }
293 }
294
295 return ret;
296}
297
298static inline void _put_counter(struct adreno_device *adreno_dev,
299 int group, int countable, unsigned int *lo,
300 unsigned int *hi)
301{
302 if (*lo != 0)
303 adreno_perfcounter_put(adreno_dev, group, countable,
304 PERFCOUNTER_FLAG_KERNEL);
305
306 *lo = 0;
307 *hi = 0;
308}
309
310/**
311 * adreno_fault_detect_start() - Allocate performance counters
312 * used for fast fault detection
313 * @adreno_dev: Pointer to an adreno_device structure
314 *
315 * Allocate the series of performance counters that should be periodically
316 * checked to verify that the GPU is still moving
317 */
318void adreno_fault_detect_start(struct adreno_device *adreno_dev)
319{
320 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
321 unsigned int i, j = ARRAY_SIZE(adreno_ft_regs_default);
322
323 if (!test_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv))
324 return;
325
326 if (adreno_dev->fast_hang_detect == 1)
327 return;
328
329 for (i = 0; i < gpudev->ft_perf_counters_count; i++) {
330 _get_counter(adreno_dev, gpudev->ft_perf_counters[i].counter,
331 gpudev->ft_perf_counters[i].countable,
332 &adreno_ft_regs[j + (i * 2)],
333 &adreno_ft_regs[j + ((i * 2) + 1)]);
334 }
335
336 adreno_dev->fast_hang_detect = 1;
337}
338
339/**
340 * adreno_fault_detect_stop() - Release performance counters
341 * used for fast fault detection
342 * @adreno_dev: Pointer to an adreno_device structure
343 *
344 * Release the counters allocated in adreno_fault_detect_start
345 */
346void adreno_fault_detect_stop(struct adreno_device *adreno_dev)
347{
348 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
349 unsigned int i, j = ARRAY_SIZE(adreno_ft_regs_default);
350
351 if (!test_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv))
352 return;
353
354 if (!adreno_dev->fast_hang_detect)
355 return;
356
357 for (i = 0; i < gpudev->ft_perf_counters_count; i++) {
358 _put_counter(adreno_dev, gpudev->ft_perf_counters[i].counter,
359 gpudev->ft_perf_counters[i].countable,
360 &adreno_ft_regs[j + (i * 2)],
361 &adreno_ft_regs[j + ((i * 2) + 1)]);
362
363 }
364
365 adreno_dev->fast_hang_detect = 0;
366}
367
368/*
369 * A workqueue callback responsible for actually turning on the GPU after a
370 * touch event. kgsl_pwrctrl_change_state(ACTIVE) is used without any
371 * active_count protection to avoid the need to maintain state. Either
372 * somebody will start using the GPU or the idle timer will fire and put the
373 * GPU back into slumber.
374 */
375static void adreno_input_work(struct work_struct *work)
376{
377 struct adreno_device *adreno_dev = container_of(work,
378 struct adreno_device, input_work);
379 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
380
381 mutex_lock(&device->mutex);
382
383 device->flags |= KGSL_FLAG_WAKE_ON_TOUCH;
384
385 /*
386 * Don't schedule adreno_start in a high priority workqueue, we are
387 * already in a workqueue which should be sufficient
388 */
389 kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
390
391 /*
392 * When waking up from a touch event we want to stay active long enough
393 * for the user to send a draw command. The default idle timer timeout
394 * is shorter than we want so go ahead and push the idle timer out
395 * further for this special case
396 */
397 mod_timer(&device->idle_timer,
398 jiffies + msecs_to_jiffies(adreno_wake_timeout));
399 mutex_unlock(&device->mutex);
400}
401
402/*
403 * Process input events and schedule work if needed. At this point we are only
404 * interested in groking EV_ABS touchscreen events
405 */
406static void adreno_input_event(struct input_handle *handle, unsigned int type,
407 unsigned int code, int value)
408{
409 struct kgsl_device *device = handle->handler->private;
410 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
411
412 /* Only consider EV_ABS (touch) events */
413 if (type != EV_ABS)
414 return;
415
416 /*
417 * Don't do anything if anything hasn't been rendered since we've been
418 * here before
419 */
420
421 if (device->flags & KGSL_FLAG_WAKE_ON_TOUCH)
422 return;
423
424 /*
425 * If the device is in nap, kick the idle timer to make sure that we
426 * don't go into slumber before the first render. If the device is
427 * already in slumber schedule the wake.
428 */
429
430 if (device->state == KGSL_STATE_NAP) {
431 /*
432 * Set the wake on touch bit to keep from coming back here and
433 * keeping the device in nap without rendering
434 */
435
436 device->flags |= KGSL_FLAG_WAKE_ON_TOUCH;
437
438 mod_timer(&device->idle_timer,
439 jiffies + device->pwrctrl.interval_timeout);
440 } else if (device->state == KGSL_STATE_SLUMBER) {
441 schedule_work(&adreno_dev->input_work);
442 }
443}
444
445#ifdef CONFIG_INPUT
446static int adreno_input_connect(struct input_handler *handler,
447 struct input_dev *dev, const struct input_device_id *id)
448{
449 struct input_handle *handle;
450 int ret;
451
452 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
453 if (handle == NULL)
454 return -ENOMEM;
455
456 handle->dev = dev;
457 handle->handler = handler;
458 handle->name = handler->name;
459
460 ret = input_register_handle(handle);
461 if (ret) {
462 kfree(handle);
463 return ret;
464 }
465
466 ret = input_open_device(handle);
467 if (ret) {
468 input_unregister_handle(handle);
469 kfree(handle);
470 }
471
472 return ret;
473}
474
475static void adreno_input_disconnect(struct input_handle *handle)
476{
477 input_close_device(handle);
478 input_unregister_handle(handle);
479 kfree(handle);
480}
481#else
482static int adreno_input_connect(struct input_handler *handler,
483 struct input_dev *dev, const struct input_device_id *id)
484{
485 return 0;
486}
487static void adreno_input_disconnect(struct input_handle *handle) {}
488#endif
489
490/*
491 * We are only interested in EV_ABS events so only register handlers for those
492 * input devices that have EV_ABS events
493 */
494static const struct input_device_id adreno_input_ids[] = {
495 {
496 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
497 .evbit = { BIT_MASK(EV_ABS) },
498 /* assumption: MT_.._X & MT_.._Y are in the same long */
499 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
500 BIT_MASK(ABS_MT_POSITION_X) |
501 BIT_MASK(ABS_MT_POSITION_Y) },
502 },
503 { },
504};
505
506static struct input_handler adreno_input_handler = {
507 .event = adreno_input_event,
508 .connect = adreno_input_connect,
509 .disconnect = adreno_input_disconnect,
510 .name = "kgsl",
511 .id_table = adreno_input_ids,
512};
513
Shrenuj Bansala419c792016-10-20 14:05:11 -0700514/*
515 * _soft_reset() - Soft reset GPU
516 * @adreno_dev: Pointer to adreno device
517 *
518 * Soft reset the GPU by doing a AHB write of value 1 to RBBM_SW_RESET
519 * register. This is used when we want to reset the GPU without
520 * turning off GFX power rail. The reset when asserted resets
521 * all the HW logic, restores GPU registers to default state and
522 * flushes out pending VBIF transactions.
523 */
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -0700524static int _soft_reset(struct adreno_device *adreno_dev)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700525{
526 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
527 unsigned int reg;
528
529 /*
530 * On a530 v1 RBBM cannot be reset in soft reset.
531 * Reset all blocks except RBBM for a530v1.
532 */
533 if (adreno_is_a530v1(adreno_dev)) {
534 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD,
535 0xFFDFFC0);
536 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
537 0x1FFFFFFF);
538 } else {
539
540 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 1);
541 /*
542 * Do a dummy read to get a brief read cycle delay for the
543 * reset to take effect
544 */
545 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, &reg);
546 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
547 }
548
549 /* The SP/TP regulator gets turned off after a soft reset */
550
551 if (gpudev->regulator_enable)
552 gpudev->regulator_enable(adreno_dev);
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -0700553
554 return 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700555}
556
557
558void adreno_irqctrl(struct adreno_device *adreno_dev, int state)
559{
560 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
561 unsigned int mask = state ? gpudev->irq->mask : 0;
562
563 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK, mask);
564}
565
566/*
567 * adreno_hang_int_callback() - Isr for fatal interrupts that hang GPU
568 * @adreno_dev: Pointer to device
569 * @bit: Interrupt bit
570 */
571void adreno_hang_int_callback(struct adreno_device *adreno_dev, int bit)
572{
573 KGSL_DRV_CRIT_RATELIMIT(KGSL_DEVICE(adreno_dev),
574 "MISC: GPU hang detected\n");
575 adreno_irqctrl(adreno_dev, 0);
576
577 /* Trigger a fault in the dispatcher - this will effect a restart */
578 adreno_set_gpu_fault(adreno_dev, ADRENO_HARD_FAULT);
579 adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
580}
581
582/*
583 * adreno_cp_callback() - CP interrupt handler
584 * @adreno_dev: Adreno device pointer
585 * @irq: irq number
586 *
587 * Handle the cp interrupt generated by GPU.
588 */
589void adreno_cp_callback(struct adreno_device *adreno_dev, int bit)
590{
591 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
592
593 adreno_dispatcher_schedule(device);
594}
595
596static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
597{
598 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
599 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
600 struct adreno_irq *irq_params = gpudev->irq;
601 irqreturn_t ret = IRQ_NONE;
602 unsigned int status = 0, tmp, int_bit;
603 int i;
604
Deepak Kumar273c5712017-01-03 21:49:03 +0530605 atomic_inc(&adreno_dev->pending_irq_refcnt);
606 /* Ensure this increment is done before the IRQ status is updated */
607 smp_mb__after_atomic();
608
Carter Cooperdf7ba702017-03-20 11:28:04 -0600609 /*
610 * On A6xx, the GPU can power down once the INT_0_STATUS is read
611 * below. But there still might be some register reads required
612 * so force the GMU/GPU into KEEPALIVE mode until done with the ISR.
613 */
614 if (gpudev->gpu_keepalive)
615 gpudev->gpu_keepalive(adreno_dev, true);
616
Shrenuj Bansala419c792016-10-20 14:05:11 -0700617 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
618
619 /*
620 * Clear all the interrupt bits but ADRENO_INT_RBBM_AHB_ERROR. Because
621 * even if we clear it here, it will stay high until it is cleared
622 * in its respective handler. Otherwise, the interrupt handler will
623 * fire again.
624 */
625 int_bit = ADRENO_INT_BIT(adreno_dev, ADRENO_INT_RBBM_AHB_ERROR);
626 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
627 status & ~int_bit);
628
629 /* Loop through all set interrupts and call respective handlers */
630 for (tmp = status; tmp != 0;) {
631 i = fls(tmp) - 1;
632
633 if (irq_params->funcs[i].func != NULL) {
634 if (irq_params->mask & BIT(i))
635 irq_params->funcs[i].func(adreno_dev, i);
636 } else
637 KGSL_DRV_CRIT_RATELIMIT(device,
638 "Unhandled interrupt bit %x\n", i);
639
640 ret = IRQ_HANDLED;
641
642 tmp &= ~BIT(i);
643 }
644
645 gpudev->irq_trace(adreno_dev, status);
646
647 /*
648 * Clear ADRENO_INT_RBBM_AHB_ERROR bit after this interrupt has been
649 * cleared in its respective handler
650 */
651 if (status & int_bit)
652 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
653 int_bit);
654
Carter Cooperdf7ba702017-03-20 11:28:04 -0600655 /* Turn off the KEEPALIVE vote from earlier unless hard fault set */
656 if (gpudev->gpu_keepalive) {
657 /* If hard fault, then let snapshot turn off the keepalive */
658 if (!(adreno_gpu_fault(adreno_dev) & ADRENO_HARD_FAULT))
659 gpudev->gpu_keepalive(adreno_dev, false);
660 }
661
Deepak Kumar273c5712017-01-03 21:49:03 +0530662 /* Make sure the regwrites are done before the decrement */
663 smp_mb__before_atomic();
664 atomic_dec(&adreno_dev->pending_irq_refcnt);
665 /* Ensure other CPUs see the decrement */
666 smp_mb__after_atomic();
667
Shrenuj Bansala419c792016-10-20 14:05:11 -0700668 return ret;
669
670}
671
672static inline bool _rev_match(unsigned int id, unsigned int entry)
673{
674 return (entry == ANY_ID || entry == id);
675}
676
677static inline const struct adreno_gpu_core *_get_gpu_core(unsigned int chipid)
678{
679 unsigned int core = ADRENO_CHIPID_CORE(chipid);
680 unsigned int major = ADRENO_CHIPID_MAJOR(chipid);
681 unsigned int minor = ADRENO_CHIPID_MINOR(chipid);
682 unsigned int patchid = ADRENO_CHIPID_PATCH(chipid);
683 int i;
684
685 for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) {
686 if (core == adreno_gpulist[i].core &&
687 _rev_match(major, adreno_gpulist[i].major) &&
688 _rev_match(minor, adreno_gpulist[i].minor) &&
689 _rev_match(patchid, adreno_gpulist[i].patchid))
690 return &adreno_gpulist[i];
691 }
692
693 return NULL;
694}
695
696static void
697adreno_identify_gpu(struct adreno_device *adreno_dev)
698{
699 const struct adreno_reg_offsets *reg_offsets;
700 struct adreno_gpudev *gpudev;
701 int i;
702
703 if (kgsl_property_read_u32(KGSL_DEVICE(adreno_dev), "qcom,chipid",
704 &adreno_dev->chipid))
705 KGSL_DRV_FATAL(KGSL_DEVICE(adreno_dev),
706 "No GPU chip ID was specified\n");
707
708 adreno_dev->gpucore = _get_gpu_core(adreno_dev->chipid);
709
710 if (adreno_dev->gpucore == NULL)
711 KGSL_DRV_FATAL(KGSL_DEVICE(adreno_dev),
712 "Unknown GPU chip ID %8.8X\n", adreno_dev->chipid);
713
714 /*
715 * The gmem size might be dynamic when ocmem is involved so copy it out
716 * of the gpu device
717 */
718
719 adreno_dev->gmem_size = adreno_dev->gpucore->gmem_size;
720
721 /*
722 * Initialize uninitialzed gpu registers, only needs to be done once
723 * Make all offsets that are not initialized to ADRENO_REG_UNUSED
724 */
725
726 gpudev = ADRENO_GPU_DEVICE(adreno_dev);
727 reg_offsets = gpudev->reg_offsets;
728
729 for (i = 0; i < ADRENO_REG_REGISTER_MAX; i++) {
730 if (reg_offsets->offset_0 != i && !reg_offsets->offsets[i])
731 reg_offsets->offsets[i] = ADRENO_REG_UNUSED;
732 }
733
734 /* Do target specific identification */
735 if (gpudev->platform_setup != NULL)
736 gpudev->platform_setup(adreno_dev);
737}
738
739static const struct platform_device_id adreno_id_table[] = {
740 { DEVICE_3D0_NAME, (unsigned long) &device_3d0, },
741 {},
742};
743
744MODULE_DEVICE_TABLE(platform, adreno_id_table);
745
746static const struct of_device_id adreno_match_table[] = {
747 { .compatible = "qcom,kgsl-3d0", .data = &device_3d0 },
748 {}
749};
750
751static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
752 struct device_node *node)
753{
754 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
755 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
756 struct device_node *child;
757
758 pwr->num_pwrlevels = 0;
759
760 for_each_child_of_node(node, child) {
761 unsigned int index;
762 struct kgsl_pwrlevel *level;
763
764 if (adreno_of_read_property(child, "reg", &index))
765 return -EINVAL;
766
767 if (index >= KGSL_MAX_PWRLEVELS) {
768 KGSL_CORE_ERR("Pwrlevel index %d is out of range\n",
769 index);
770 continue;
771 }
772
773 if (index >= pwr->num_pwrlevels)
774 pwr->num_pwrlevels = index + 1;
775
776 level = &pwr->pwrlevels[index];
777
778 if (adreno_of_read_property(child, "qcom,gpu-freq",
779 &level->gpu_freq))
780 return -EINVAL;
781
782 if (adreno_of_read_property(child, "qcom,bus-freq",
783 &level->bus_freq))
784 return -EINVAL;
785
786 if (of_property_read_u32(child, "qcom,bus-min",
787 &level->bus_min))
788 level->bus_min = level->bus_freq;
789
790 if (of_property_read_u32(child, "qcom,bus-max",
791 &level->bus_max))
792 level->bus_max = level->bus_freq;
793 }
794
795 return 0;
796}
797
798
799static void adreno_of_get_initial_pwrlevel(struct adreno_device *adreno_dev,
800 struct device_node *node)
801{
802 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
803 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
804 int init_level = 1;
805
806 of_property_read_u32(node, "qcom,initial-pwrlevel", &init_level);
807
808 if (init_level < 0 || init_level > pwr->num_pwrlevels)
809 init_level = 1;
810
811 pwr->active_pwrlevel = init_level;
812 pwr->default_pwrlevel = init_level;
813}
814
815static int adreno_of_get_legacy_pwrlevels(struct adreno_device *adreno_dev,
816 struct device_node *parent)
817{
818 struct device_node *node;
819 int ret;
820
821 node = of_find_node_by_name(parent, "qcom,gpu-pwrlevels");
822
823 if (node == NULL) {
824 KGSL_CORE_ERR("Unable to find 'qcom,gpu-pwrlevels'\n");
825 return -EINVAL;
826 }
827
828 ret = adreno_of_parse_pwrlevels(adreno_dev, node);
829 if (ret == 0)
830 adreno_of_get_initial_pwrlevel(adreno_dev, parent);
831 return ret;
832}
833
834static int adreno_of_get_pwrlevels(struct adreno_device *adreno_dev,
835 struct device_node *parent)
836{
837 struct device_node *node, *child;
838
839 node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
840 if (node == NULL)
841 return adreno_of_get_legacy_pwrlevels(adreno_dev, parent);
842
843 for_each_child_of_node(node, child) {
844 unsigned int bin;
845
846 if (of_property_read_u32(child, "qcom,speed-bin", &bin))
847 continue;
848
849 if (bin == adreno_dev->speed_bin) {
850 int ret;
851
852 ret = adreno_of_parse_pwrlevels(adreno_dev, child);
853 if (ret == 0)
854 adreno_of_get_initial_pwrlevel(adreno_dev,
855 child);
856 return ret;
857 }
858 }
859
860 return -ENODEV;
861}
862
863static inline struct adreno_device *adreno_get_dev(struct platform_device *pdev)
864{
865 const struct of_device_id *of_id =
866 of_match_device(adreno_match_table, &pdev->dev);
867
868 return of_id ? (struct adreno_device *) of_id->data : NULL;
869}
870
871static struct {
872 unsigned int quirk;
873 const char *prop;
874} adreno_quirks[] = {
875 { ADRENO_QUIRK_TWO_PASS_USE_WFI, "qcom,gpu-quirk-two-pass-use-wfi" },
876 { ADRENO_QUIRK_IOMMU_SYNC, "qcom,gpu-quirk-iommu-sync" },
877 { ADRENO_QUIRK_CRITICAL_PACKETS, "qcom,gpu-quirk-critical-packets" },
878 { ADRENO_QUIRK_FAULT_DETECT_MASK, "qcom,gpu-quirk-fault-detect-mask" },
879 { ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING,
880 "qcom,gpu-quirk-dp2clockgating-disable" },
881 { ADRENO_QUIRK_DISABLE_LMLOADKILL,
882 "qcom,gpu-quirk-lmloadkill-disable" },
Kyle Pieferb1027b02017-02-10 13:58:58 -0800883 { ADRENO_QUIRK_HFI_USE_REG, "qcom,gpu-quirk-hfi-use-reg" },
Shrenuj Bansala419c792016-10-20 14:05:11 -0700884};
885
886static int adreno_of_get_power(struct adreno_device *adreno_dev,
887 struct platform_device *pdev)
888{
889 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
890 struct device_node *node = pdev->dev.of_node;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800891 struct resource *res;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700892 int i;
893 unsigned int timeout;
894
895 if (of_property_read_string(node, "label", &pdev->name)) {
896 KGSL_CORE_ERR("Unable to read 'label'\n");
897 return -EINVAL;
898 }
899
900 if (adreno_of_read_property(node, "qcom,id", &pdev->id))
901 return -EINVAL;
902
903 /* Set up quirks and other boolean options */
904 for (i = 0; i < ARRAY_SIZE(adreno_quirks); i++) {
905 if (of_property_read_bool(node, adreno_quirks[i].prop))
906 adreno_dev->quirks |= adreno_quirks[i].quirk;
907 }
908
Kyle Pieferb1027b02017-02-10 13:58:58 -0800909 /* Get starting physical address of device registers */
910 res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
911 device->iomemname);
912 if (res == NULL) {
913 KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
914 return -EINVAL;
915 }
916 if (res->start == 0 || resource_size(res) == 0) {
917 KGSL_DRV_ERR(device, "dev %d invalid register region\n",
918 device->id);
919 return -EINVAL;
920 }
921
922 device->reg_phys = res->start;
923 device->reg_len = resource_size(res);
924
Shrenuj Bansala419c792016-10-20 14:05:11 -0700925 if (adreno_of_get_pwrlevels(adreno_dev, node))
926 return -EINVAL;
927
928 /* get pm-qos-active-latency, set it to default if not found */
929 if (of_property_read_u32(node, "qcom,pm-qos-active-latency",
930 &device->pwrctrl.pm_qos_active_latency))
931 device->pwrctrl.pm_qos_active_latency = 501;
932
933 /* get pm-qos-cpu-mask-latency, set it to default if not found */
934 if (of_property_read_u32(node, "qcom,l2pc-cpu-mask-latency",
935 &device->pwrctrl.pm_qos_cpu_mask_latency))
936 device->pwrctrl.pm_qos_cpu_mask_latency = 501;
937
938 /* get pm-qos-wakeup-latency, set it to default if not found */
939 if (of_property_read_u32(node, "qcom,pm-qos-wakeup-latency",
940 &device->pwrctrl.pm_qos_wakeup_latency))
941 device->pwrctrl.pm_qos_wakeup_latency = 101;
942
943 if (of_property_read_u32(node, "qcom,idle-timeout", &timeout))
944 timeout = 80;
945
946 device->pwrctrl.interval_timeout = msecs_to_jiffies(timeout);
947
948 device->pwrctrl.bus_control = of_property_read_bool(node,
949 "qcom,bus-control");
950
Hareesh Gundu5648ead2017-07-28 16:48:00 +0530951 device->pwrctrl.input_disable = of_property_read_bool(node,
952 "qcom,disable-wake-on-touch");
953
Shrenuj Bansala419c792016-10-20 14:05:11 -0700954 return 0;
955}
956
957#ifdef CONFIG_QCOM_OCMEM
958static int
959adreno_ocmem_malloc(struct adreno_device *adreno_dev)
960{
961 if (!ADRENO_FEATURE(adreno_dev, ADRENO_USES_OCMEM))
962 return 0;
963
964 if (adreno_dev->ocmem_hdl == NULL) {
965 adreno_dev->ocmem_hdl =
966 ocmem_allocate(OCMEM_GRAPHICS, adreno_dev->gmem_size);
967 if (IS_ERR_OR_NULL(adreno_dev->ocmem_hdl)) {
968 adreno_dev->ocmem_hdl = NULL;
969 return -ENOMEM;
970 }
971
972 adreno_dev->gmem_size = adreno_dev->ocmem_hdl->len;
973 adreno_dev->gmem_base = adreno_dev->ocmem_hdl->addr;
974 }
975
976 return 0;
977}
978
979static void
980adreno_ocmem_free(struct adreno_device *adreno_dev)
981{
982 if (adreno_dev->ocmem_hdl != NULL) {
983 ocmem_free(OCMEM_GRAPHICS, adreno_dev->ocmem_hdl);
984 adreno_dev->ocmem_hdl = NULL;
985 }
986}
987#else
988static int
989adreno_ocmem_malloc(struct adreno_device *adreno_dev)
990{
991 return 0;
992}
993
994static void
995adreno_ocmem_free(struct adreno_device *adreno_dev)
996{
997}
998#endif
999
1000static int adreno_probe(struct platform_device *pdev)
1001{
1002 struct kgsl_device *device;
1003 struct adreno_device *adreno_dev;
1004 int status;
1005
1006 adreno_dev = adreno_get_dev(pdev);
1007
1008 if (adreno_dev == NULL) {
1009 pr_err("adreno: qcom,kgsl-3d0 does not exist in the device tree");
1010 return -ENODEV;
1011 }
1012
1013 device = KGSL_DEVICE(adreno_dev);
1014 device->pdev = pdev;
1015
1016 /* Get the chip ID from the DT and set up target specific parameters */
1017 adreno_identify_gpu(adreno_dev);
1018
1019 status = adreno_of_get_power(adreno_dev, pdev);
1020 if (status) {
1021 device->pdev = NULL;
1022 return status;
1023 }
1024
1025 /*
Kyle Pieferb1027b02017-02-10 13:58:58 -08001026 * Probe/init GMU after initial gpu power probe
1027 * Another part of GPU power probe in platform_probe
1028 * needs GMU initialized.
1029 */
1030 status = gmu_probe(device);
1031 if (status != 0 && status != -ENXIO) {
1032 device->pdev = NULL;
1033 return status;
1034 }
1035
1036 /*
Shrenuj Bansala419c792016-10-20 14:05:11 -07001037 * The SMMU APIs use unsigned long for virtual addresses which means
1038 * that we cannot use 64 bit virtual addresses on a 32 bit kernel even
1039 * though the hardware and the rest of the KGSL driver supports it.
1040 */
1041 if (adreno_support_64bit(adreno_dev))
1042 device->mmu.features |= KGSL_MMU_64BIT;
1043
1044 status = kgsl_device_platform_probe(device);
1045 if (status) {
1046 device->pdev = NULL;
1047 return status;
1048 }
1049
1050 /*
1051 * qcom,iommu-secure-id is used to identify MMUs that can handle secure
1052 * content but that is only part of the story - the GPU also has to be
1053 * able to handle secure content. Unfortunately in a classic catch-22
1054 * we cannot identify the GPU until after the DT is parsed. tl;dr -
1055 * check the GPU capabilities here and modify mmu->secured accordingly
1056 */
1057
1058 if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
1059 device->mmu.secured = false;
1060
1061 status = adreno_ringbuffer_probe(adreno_dev, nopreempt);
1062 if (status)
1063 goto out;
1064
1065 status = adreno_dispatcher_init(adreno_dev);
1066 if (status)
1067 goto out;
1068
1069 adreno_debugfs_init(adreno_dev);
1070 adreno_profile_init(adreno_dev);
1071
1072 adreno_sysfs_init(adreno_dev);
1073
1074 kgsl_pwrscale_init(&pdev->dev, CONFIG_QCOM_ADRENO_DEFAULT_GOVERNOR);
1075
1076 /* Initialize coresight for the target */
1077 adreno_coresight_init(adreno_dev);
1078
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001079 /* Get the system cache slice descriptor for GPU */
1080 adreno_dev->gpu_llc_slice = adreno_llc_getd(&pdev->dev, "gpu");
1081 if (IS_ERR(adreno_dev->gpu_llc_slice)) {
1082 KGSL_DRV_WARN(device,
1083 "Failed to get GPU LLC slice descriptor (%ld)\n",
1084 PTR_ERR(adreno_dev->gpu_llc_slice));
1085 adreno_dev->gpu_llc_slice = NULL;
1086 }
1087
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001088 /* Get the system cache slice descriptor for GPU pagetables */
1089 adreno_dev->gpuhtw_llc_slice = adreno_llc_getd(&pdev->dev, "gpuhtw");
1090 if (IS_ERR(adreno_dev->gpuhtw_llc_slice)) {
1091 KGSL_DRV_WARN(device,
1092 "Failed to get gpuhtw LLC slice descriptor (%ld)\n",
1093 PTR_ERR(adreno_dev->gpuhtw_llc_slice));
1094 adreno_dev->gpuhtw_llc_slice = NULL;
1095 }
1096
Shrenuj Bansala419c792016-10-20 14:05:11 -07001097#ifdef CONFIG_INPUT
Hareesh Gundu5648ead2017-07-28 16:48:00 +05301098 if (!device->pwrctrl.input_disable) {
1099 adreno_input_handler.private = device;
1100 /*
1101 * It isn't fatal if we cannot register the input handler. Sad,
1102 * perhaps, but not fatal
1103 */
1104 if (input_register_handler(&adreno_input_handler)) {
1105 adreno_input_handler.private = NULL;
1106 KGSL_DRV_ERR(device,
1107 "Unable to register the input handler\n");
1108 }
1109 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07001110#endif
1111out:
1112 if (status) {
1113 adreno_ringbuffer_close(adreno_dev);
1114 kgsl_device_platform_remove(device);
1115 device->pdev = NULL;
1116 }
1117
1118 return status;
1119}
1120
1121static void _adreno_free_memories(struct adreno_device *adreno_dev)
1122{
1123 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001124 struct adreno_firmware *pfp_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
1125 struct adreno_firmware *pm4_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001126
1127 if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv))
1128 kgsl_free_global(device, &adreno_dev->profile_buffer);
1129
1130 /* Free local copies of firmware and other command streams */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001131 kfree(pfp_fw->fwvirt);
1132 pfp_fw->fwvirt = NULL;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001133
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001134 kfree(pm4_fw->fwvirt);
1135 pm4_fw->fwvirt = NULL;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001136
1137 kfree(adreno_dev->gpmu_cmds);
1138 adreno_dev->gpmu_cmds = NULL;
1139
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001140 kgsl_free_global(device, &pfp_fw->memdesc);
1141 kgsl_free_global(device, &pm4_fw->memdesc);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001142}
1143
1144static int adreno_remove(struct platform_device *pdev)
1145{
1146 struct adreno_device *adreno_dev = adreno_get_dev(pdev);
1147 struct adreno_gpudev *gpudev;
1148 struct kgsl_device *device;
1149
1150 if (adreno_dev == NULL)
1151 return 0;
1152
1153 device = KGSL_DEVICE(adreno_dev);
1154 gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1155
1156 if (gpudev->remove != NULL)
1157 gpudev->remove(adreno_dev);
1158
1159 /* The memory is fading */
1160 _adreno_free_memories(adreno_dev);
1161
1162#ifdef CONFIG_INPUT
Hareesh Gundu5648ead2017-07-28 16:48:00 +05301163 if (adreno_input_handler.private)
1164 input_unregister_handler(&adreno_input_handler);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001165#endif
1166 adreno_sysfs_close(adreno_dev);
1167
1168 adreno_coresight_remove(adreno_dev);
1169 adreno_profile_close(adreno_dev);
1170
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001171 /* Release the system cache slice descriptor */
1172 if (adreno_dev->gpu_llc_slice)
1173 adreno_llc_putd(adreno_dev->gpu_llc_slice);
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001174 if (adreno_dev->gpuhtw_llc_slice)
1175 adreno_llc_putd(adreno_dev->gpuhtw_llc_slice);
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001176
Shrenuj Bansala419c792016-10-20 14:05:11 -07001177 kgsl_pwrscale_close(device);
1178
1179 adreno_dispatcher_close(adreno_dev);
1180 adreno_ringbuffer_close(adreno_dev);
1181
1182 adreno_fault_detect_stop(adreno_dev);
1183
1184 kfree(adreno_ft_regs);
1185 adreno_ft_regs = NULL;
1186
1187 kfree(adreno_ft_regs_val);
1188 adreno_ft_regs_val = NULL;
1189
1190 if (efuse_base != NULL)
1191 iounmap(efuse_base);
1192
1193 adreno_perfcounter_close(adreno_dev);
1194 kgsl_device_platform_remove(device);
1195
Kyle Pieferb1027b02017-02-10 13:58:58 -08001196 gmu_remove(device);
1197
Shrenuj Bansala419c792016-10-20 14:05:11 -07001198 if (test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv)) {
1199 kgsl_free_global(device, &adreno_dev->pwron_fixup);
1200 clear_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv);
1201 }
1202 clear_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
1203
1204 return 0;
1205}
1206
1207static void adreno_fault_detect_init(struct adreno_device *adreno_dev)
1208{
1209 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Shrenuj Bansalae672812016-02-24 14:17:30 -08001210 int i;
1211
1212 if (!(swfdetect ||
1213 ADRENO_FEATURE(adreno_dev, ADRENO_SOFT_FAULT_DETECT)))
1214 return;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001215
1216 /* Disable the fast hang detect bit until we know its a go */
1217 adreno_dev->fast_hang_detect = 0;
1218
1219 adreno_ft_regs_num = (ARRAY_SIZE(adreno_ft_regs_default) +
1220 gpudev->ft_perf_counters_count*2);
1221
1222 adreno_ft_regs = kcalloc(adreno_ft_regs_num, sizeof(unsigned int),
1223 GFP_KERNEL);
1224 adreno_ft_regs_val = kcalloc(adreno_ft_regs_num, sizeof(unsigned int),
1225 GFP_KERNEL);
1226
1227 if (adreno_ft_regs == NULL || adreno_ft_regs_val == NULL) {
1228 kfree(adreno_ft_regs);
1229 kfree(adreno_ft_regs_val);
1230
1231 adreno_ft_regs = NULL;
1232 adreno_ft_regs_val = NULL;
1233
1234 return;
1235 }
1236
1237 for (i = 0; i < ARRAY_SIZE(adreno_ft_regs_default); i++)
1238 adreno_ft_regs[i] = adreno_getreg(adreno_dev,
1239 adreno_ft_regs_default[i]);
1240
1241 set_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv);
1242
Shrenuj Bansalae672812016-02-24 14:17:30 -08001243 adreno_fault_detect_start(adreno_dev);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001244}
1245
1246static int adreno_init(struct kgsl_device *device)
1247{
1248 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1249 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1250 int ret;
1251
1252 ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
1253 if (ret)
1254 return ret;
1255
1256 /*
1257 * initialization only needs to be done once initially until
1258 * device is shutdown
1259 */
1260 if (test_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv))
1261 return 0;
1262
1263 /*
1264 * Either the microcode read failed because the usermodehelper isn't
1265 * available or the microcode was corrupted. Fail the init and force
1266 * the user to try the open() again
1267 */
1268
1269 ret = gpudev->microcode_read(adreno_dev);
1270 if (ret)
1271 return ret;
1272
1273 /* Put the GPU in a responsive state */
George Shen3726c812017-05-12 11:06:03 -07001274 if (ADRENO_GPUREV(adreno_dev) < 600) {
1275 /* No need for newer generation architectures */
1276 ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
1277 if (ret)
1278 return ret;
1279 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07001280
1281 ret = adreno_iommu_init(adreno_dev);
1282 if (ret)
1283 return ret;
1284
1285 adreno_perfcounter_init(adreno_dev);
1286 adreno_fault_detect_init(adreno_dev);
1287
1288 /* Power down the device */
George Shen3726c812017-05-12 11:06:03 -07001289 if (ADRENO_GPUREV(adreno_dev) < 600)
1290 kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001291
1292 if (gpudev->init != NULL)
1293 gpudev->init(adreno_dev);
1294
1295 set_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
1296
1297 /* Use shader offset and length defined in gpudev */
1298 if (adreno_dev->gpucore->shader_offset &&
1299 adreno_dev->gpucore->shader_size) {
1300
1301 if (device->shader_mem_phys || device->shader_mem_virt)
1302 KGSL_DRV_ERR(device,
1303 "Shader memory already specified in device tree\n");
1304 else {
1305 device->shader_mem_phys = device->reg_phys +
1306 adreno_dev->gpucore->shader_offset;
1307 device->shader_mem_virt = device->reg_virt +
1308 adreno_dev->gpucore->shader_offset;
1309 device->shader_mem_len =
1310 adreno_dev->gpucore->shader_size;
1311 }
1312 }
1313
1314 /*
1315 * Allocate a small chunk of memory for precise drawobj profiling for
1316 * those targets that have the always on timer
1317 */
1318
1319 if (!adreno_is_a3xx(adreno_dev)) {
1320 int r = kgsl_allocate_global(device,
1321 &adreno_dev->profile_buffer, PAGE_SIZE,
1322 0, 0, "alwayson");
1323
1324 adreno_dev->profile_index = 0;
1325
1326 if (r == 0) {
1327 set_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE,
1328 &adreno_dev->priv);
1329 kgsl_sharedmem_set(device,
1330 &adreno_dev->profile_buffer, 0, 0,
1331 PAGE_SIZE);
1332 }
1333
1334 }
1335
Harshdeep Dhatt38e57d72017-08-30 13:24:07 -06001336 if (nopreempt == false) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07001337 int r = 0;
1338
1339 if (gpudev->preemption_init)
1340 r = gpudev->preemption_init(adreno_dev);
1341
1342 if (r == 0)
1343 set_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
1344 else
1345 WARN(1, "adreno: GPU preemption is disabled\n");
1346 }
1347
1348 return 0;
1349}
1350
1351static bool regulators_left_on(struct kgsl_device *device)
1352{
1353 int i;
1354
George Shen3726c812017-05-12 11:06:03 -07001355 if (kgsl_gmu_isenabled(device))
1356 return false;
1357
Shrenuj Bansala419c792016-10-20 14:05:11 -07001358 for (i = 0; i < KGSL_MAX_REGULATORS; i++) {
1359 struct kgsl_regulator *regulator =
1360 &device->pwrctrl.regulators[i];
1361
1362 if (IS_ERR_OR_NULL(regulator->reg))
1363 break;
1364
1365 if (regulator_is_enabled(regulator->reg))
1366 return true;
1367 }
1368
1369 return false;
1370}
1371
1372static void _set_secvid(struct kgsl_device *device)
1373{
1374 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1375
1376 /* Program GPU contect protection init values */
1377 if (device->mmu.secured) {
1378 if (adreno_is_a4xx(adreno_dev))
1379 adreno_writereg(adreno_dev,
1380 ADRENO_REG_RBBM_SECVID_TRUST_CONFIG, 0x2);
1381 adreno_writereg(adreno_dev,
1382 ADRENO_REG_RBBM_SECVID_TSB_CONTROL, 0x0);
1383
1384 adreno_writereg64(adreno_dev,
1385 ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
1386 ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
1387 KGSL_IOMMU_SECURE_BASE);
1388 adreno_writereg(adreno_dev,
1389 ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
1390 KGSL_IOMMU_SECURE_SIZE);
1391 }
1392}
1393
Carter Cooper1d8f5472017-03-15 15:01:09 -06001394static int adreno_switch_to_unsecure_mode(struct adreno_device *adreno_dev,
1395 struct adreno_ringbuffer *rb)
1396{
1397 unsigned int *cmds;
1398 int ret;
1399
1400 cmds = adreno_ringbuffer_allocspace(rb, 2);
1401 if (IS_ERR(cmds))
1402 return PTR_ERR(cmds);
1403 if (cmds == NULL)
1404 return -ENOSPC;
1405
1406 cmds += cp_secure_mode(adreno_dev, cmds, 0);
1407
1408 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
1409 if (ret)
1410 adreno_spin_idle_debug(adreno_dev,
1411 "Switch to unsecure failed to idle\n");
1412
1413 return ret;
1414}
1415
1416int adreno_set_unsecured_mode(struct adreno_device *adreno_dev,
1417 struct adreno_ringbuffer *rb)
1418{
1419 int ret = 0;
1420
Carter Cooper4a313ae2017-02-23 11:11:56 -07001421 if (!adreno_is_a5xx(adreno_dev) && !adreno_is_a6xx(adreno_dev))
Carter Cooper1d8f5472017-03-15 15:01:09 -06001422 return -EINVAL;
1423
1424 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS) &&
1425 adreno_is_a5xx(adreno_dev)) {
1426 ret = a5xx_critical_packet_submit(adreno_dev, rb);
1427 if (ret)
1428 return ret;
1429 }
1430
1431 /* GPU comes up in secured mode, make it unsecured by default */
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -06001432 if (adreno_dev->zap_loaded)
Carter Cooper1d8f5472017-03-15 15:01:09 -06001433 ret = adreno_switch_to_unsecure_mode(adreno_dev, rb);
1434 else
1435 adreno_writereg(adreno_dev,
1436 ADRENO_REG_RBBM_SECVID_TRUST_CONTROL, 0x0);
1437
1438 return ret;
1439}
1440
Shrenuj Bansala419c792016-10-20 14:05:11 -07001441/**
1442 * _adreno_start - Power up the GPU and prepare to accept commands
1443 * @adreno_dev: Pointer to an adreno_device structure
1444 *
1445 * The core function that powers up and initalizes the GPU. This function is
1446 * called at init and after coming out of SLUMBER
1447 */
1448static int _adreno_start(struct adreno_device *adreno_dev)
1449{
1450 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1451 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1452 int status = -EINVAL, ret;
1453 unsigned int state = device->state;
1454 bool regulator_left_on;
1455 unsigned int pmqos_wakeup_vote = device->pwrctrl.pm_qos_wakeup_latency;
1456 unsigned int pmqos_active_vote = device->pwrctrl.pm_qos_active_latency;
1457
1458 /* make sure ADRENO_DEVICE_STARTED is not set here */
1459 BUG_ON(test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv));
1460
Gaurav Sonwanic169c322017-06-15 14:11:23 +05301461 /* disallow l2pc during wake up to improve GPU wake up time */
1462 kgsl_pwrctrl_update_l2pc(&adreno_dev->dev,
1463 KGSL_L2PC_WAKEUP_TIMEOUT);
1464
Shrenuj Bansala419c792016-10-20 14:05:11 -07001465 pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
1466 pmqos_wakeup_vote);
1467
1468 regulator_left_on = regulators_left_on(device);
1469
1470 /* Clear any GPU faults that might have been left over */
1471 adreno_clear_gpu_fault(adreno_dev);
1472
1473 /* Put the GPU in a responsive state */
1474 status = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
1475 if (status)
1476 goto error_pwr_off;
1477
1478 /* Set the bit to indicate that we've just powered on */
1479 set_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv);
1480
1481 /* Soft reset the GPU if a regulator is stuck on*/
1482 if (regulator_left_on)
1483 _soft_reset(adreno_dev);
1484
1485 adreno_ringbuffer_set_global(adreno_dev, 0);
1486
1487 status = kgsl_mmu_start(device);
1488 if (status)
1489 goto error_pwr_off;
1490
1491 _set_secvid(device);
1492
1493 status = adreno_ocmem_malloc(adreno_dev);
1494 if (status) {
1495 KGSL_DRV_ERR(device, "OCMEM malloc failed\n");
1496 goto error_mmu_off;
1497 }
1498
Carter Coopera2a12982017-05-02 08:43:15 -06001499 /* Send OOB request to turn on the GX */
1500 if (gpudev->oob_set) {
1501 status = gpudev->oob_set(adreno_dev, OOB_GPUSTART_SET_MASK,
1502 OOB_GPUSTART_CHECK_MASK,
1503 OOB_GPUSTART_CLEAR_MASK);
1504 if (status)
1505 goto error_mmu_off;
1506 }
1507
Shrenuj Bansala419c792016-10-20 14:05:11 -07001508 /* Enable 64 bit gpu addr if feature is set */
1509 if (gpudev->enable_64bit &&
1510 adreno_support_64bit(adreno_dev))
1511 gpudev->enable_64bit(adreno_dev);
1512
1513 if (adreno_dev->perfctr_pwr_lo == 0) {
1514 ret = adreno_perfcounter_get(adreno_dev,
1515 KGSL_PERFCOUNTER_GROUP_PWR, 1,
1516 &adreno_dev->perfctr_pwr_lo, NULL,
1517 PERFCOUNTER_FLAG_KERNEL);
1518
1519 if (ret) {
Kyle Piefer74645b532017-05-16 11:45:40 -07001520 WARN_ONCE(1, "Unable to get perf counters for DCVS\n");
Shrenuj Bansala419c792016-10-20 14:05:11 -07001521 adreno_dev->perfctr_pwr_lo = 0;
1522 }
1523 }
1524
1525
1526 if (device->pwrctrl.bus_control) {
1527 /* VBIF waiting for RAM */
1528 if (adreno_dev->starved_ram_lo == 0) {
1529 ret = adreno_perfcounter_get(adreno_dev,
1530 KGSL_PERFCOUNTER_GROUP_VBIF_PWR, 0,
1531 &adreno_dev->starved_ram_lo, NULL,
1532 PERFCOUNTER_FLAG_KERNEL);
1533
1534 if (ret) {
1535 KGSL_DRV_ERR(device,
1536 "Unable to get perf counters for bus DCVS\n");
1537 adreno_dev->starved_ram_lo = 0;
1538 }
1539 }
1540
1541 /* VBIF DDR cycles */
1542 if (adreno_dev->ram_cycles_lo == 0) {
1543 ret = adreno_perfcounter_get(adreno_dev,
1544 KGSL_PERFCOUNTER_GROUP_VBIF,
1545 VBIF_AXI_TOTAL_BEATS,
1546 &adreno_dev->ram_cycles_lo, NULL,
1547 PERFCOUNTER_FLAG_KERNEL);
1548
1549 if (ret) {
1550 KGSL_DRV_ERR(device,
1551 "Unable to get perf counters for bus DCVS\n");
1552 adreno_dev->ram_cycles_lo = 0;
1553 }
1554 }
1555 }
1556
1557 /* Clear the busy_data stats - we're starting over from scratch */
1558 adreno_dev->busy_data.gpu_busy = 0;
1559 adreno_dev->busy_data.vbif_ram_cycles = 0;
1560 adreno_dev->busy_data.vbif_starved_ram = 0;
1561
1562 /* Restore performance counter registers with saved values */
1563 adreno_perfcounter_restore(adreno_dev);
1564
1565 /* Start the GPU */
1566 gpudev->start(adreno_dev);
1567
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001568 /*
1569 * The system cache control registers
1570 * live on the CX rail. Hence need
1571 * reprogramming everytime the GPU
1572 * comes out of power collapse.
1573 */
1574 adreno_llc_setup(device);
1575
Shrenuj Bansala419c792016-10-20 14:05:11 -07001576 /* Re-initialize the coresight registers if applicable */
1577 adreno_coresight_start(adreno_dev);
1578
1579 adreno_irqctrl(adreno_dev, 1);
1580
1581 adreno_perfcounter_start(adreno_dev);
1582
1583 /* Clear FSR here in case it is set from a previous pagefault */
1584 kgsl_mmu_clear_fsr(&device->mmu);
1585
1586 status = adreno_ringbuffer_start(adreno_dev, ADRENO_START_COLD);
1587 if (status)
Carter Coopera2a12982017-05-02 08:43:15 -06001588 goto error_oob_clear;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001589
1590 /* Start the dispatcher */
1591 adreno_dispatcher_start(device);
1592
1593 device->reset_counter++;
1594
1595 set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
1596
1597 if (pmqos_active_vote != pmqos_wakeup_vote)
1598 pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
1599 pmqos_active_vote);
1600
Carter Coopera2a12982017-05-02 08:43:15 -06001601 /* Send OOB request to allow IFPC */
1602 if (gpudev->oob_clear)
1603 gpudev->oob_clear(adreno_dev, OOB_GPUSTART_CLEAR_MASK);
1604
Shrenuj Bansala419c792016-10-20 14:05:11 -07001605 return 0;
1606
Carter Coopera2a12982017-05-02 08:43:15 -06001607error_oob_clear:
1608 if (gpudev->oob_clear)
1609 gpudev->oob_clear(adreno_dev, OOB_GPUSTART_CLEAR_MASK);
1610
Shrenuj Bansala419c792016-10-20 14:05:11 -07001611error_mmu_off:
1612 kgsl_mmu_stop(&device->mmu);
1613
1614error_pwr_off:
1615 /* set the state back to original state */
1616 kgsl_pwrctrl_change_state(device, state);
1617
1618 if (pmqos_active_vote != pmqos_wakeup_vote)
1619 pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
1620 pmqos_active_vote);
1621
1622 return status;
1623}
1624
1625/**
1626 * adreno_start() - Power up and initialize the GPU
1627 * @device: Pointer to the KGSL device to power up
1628 * @priority: Boolean flag to specify of the start should be scheduled in a low
1629 * latency work queue
1630 *
1631 * Power up the GPU and initialize it. If priority is specified then elevate
1632 * the thread priority for the duration of the start operation
1633 */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07001634int adreno_start(struct kgsl_device *device, int priority)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001635{
1636 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1637 int nice = task_nice(current);
1638 int ret;
1639
1640 if (priority && (adreno_wake_nice < nice))
1641 set_user_nice(current, adreno_wake_nice);
1642
1643 ret = _adreno_start(adreno_dev);
1644
1645 if (priority)
1646 set_user_nice(current, nice);
1647
1648 return ret;
1649}
1650
Shrenuj Bansala419c792016-10-20 14:05:11 -07001651static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
1652{
1653 int i;
1654 struct adreno_ringbuffer *rb;
1655
1656 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
1657 if (rb->drawctxt_active)
1658 kgsl_context_put(&(rb->drawctxt_active->base));
1659 rb->drawctxt_active = NULL;
1660
1661 kgsl_sharedmem_writel(KGSL_DEVICE(adreno_dev),
1662 &rb->pagetable_desc, PT_INFO_OFFSET(current_rb_ptname),
1663 0);
1664 }
1665}
1666
1667static int adreno_stop(struct kgsl_device *device)
1668{
1669 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1670
1671 if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
1672 return 0;
1673
1674 adreno_set_active_ctxs_null(adreno_dev);
1675
1676 adreno_dispatcher_stop(adreno_dev);
1677
1678 adreno_ringbuffer_stop(adreno_dev);
1679
1680 kgsl_pwrscale_update_stats(device);
1681
1682 adreno_irqctrl(adreno_dev, 0);
1683
1684 adreno_ocmem_free(adreno_dev);
1685
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001686 if (adreno_dev->gpu_llc_slice)
1687 adreno_llc_deactivate_slice(adreno_dev->gpu_llc_slice);
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001688 if (adreno_dev->gpuhtw_llc_slice)
1689 adreno_llc_deactivate_slice(adreno_dev->gpuhtw_llc_slice);
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001690
Shrenuj Bansala419c792016-10-20 14:05:11 -07001691 /* Save active coresight registers if applicable */
1692 adreno_coresight_stop(adreno_dev);
1693
1694 /* Save physical performance counter values before GPU power down*/
1695 adreno_perfcounter_save(adreno_dev);
1696
1697 adreno_vbif_clear_pending_transactions(device);
1698
1699 kgsl_mmu_stop(&device->mmu);
1700
1701 clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
1702
1703 return 0;
1704}
1705
1706static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
1707{
1708 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1709
1710 /*
1711 * Do not do soft reset for a IOMMU fault (because the IOMMU hardware
1712 * needs a reset too) or for the A304 because it can't do SMMU
1713 * programming of any kind after a soft reset
1714 */
1715
1716 if ((fault & ADRENO_IOMMU_PAGE_FAULT) || adreno_is_a304(adreno_dev))
1717 return false;
1718
1719 return true;
1720}
1721
1722/**
1723 * adreno_reset() - Helper function to reset the GPU
1724 * @device: Pointer to the KGSL device structure for the GPU
1725 * @fault: Type of fault. Needed to skip soft reset for MMU fault
1726 *
1727 * Try to reset the GPU to recover from a fault. First, try to do a low latency
1728 * soft reset. If the soft reset fails for some reason, then bring out the big
1729 * guns and toggle the footswitch.
1730 */
1731int adreno_reset(struct kgsl_device *device, int fault)
1732{
1733 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1734 int ret = -EINVAL;
1735 int i = 0;
1736
1737 /* Try soft reset first */
1738 if (adreno_try_soft_reset(device, fault)) {
1739 /* Make sure VBIF is cleared before resetting */
1740 ret = adreno_vbif_clear_pending_transactions(device);
1741
1742 if (ret == 0) {
1743 ret = adreno_soft_reset(device);
1744 if (ret)
1745 KGSL_DEV_ERR_ONCE(device,
1746 "Device soft reset failed\n");
1747 }
1748 }
1749 if (ret) {
1750 /* If soft reset failed/skipped, then pull the power */
1751 kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
1752 /* since device is officially off now clear start bit */
1753 clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
1754
1755 /* Keep trying to start the device until it works */
1756 for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) {
1757 ret = adreno_start(device, 0);
1758 if (!ret)
1759 break;
1760
1761 msleep(20);
1762 }
1763 }
1764 if (ret)
1765 return ret;
1766
1767 if (i != 0)
1768 KGSL_DRV_WARN(device, "Device hard reset tried %d tries\n", i);
1769
1770 /*
1771 * If active_cnt is non-zero then the system was active before
1772 * going into a reset - put it back in that state
1773 */
1774
1775 if (atomic_read(&device->active_cnt))
1776 kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
1777 else
1778 kgsl_pwrctrl_change_state(device, KGSL_STATE_NAP);
1779
1780 return ret;
1781}
1782
1783static int adreno_getproperty(struct kgsl_device *device,
1784 unsigned int type,
1785 void __user *value,
1786 size_t sizebytes)
1787{
1788 int status = -EINVAL;
1789 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1790
1791 switch (type) {
1792 case KGSL_PROP_DEVICE_INFO:
1793 {
1794 struct kgsl_devinfo devinfo;
1795
1796 if (sizebytes != sizeof(devinfo)) {
1797 status = -EINVAL;
1798 break;
1799 }
1800
1801 memset(&devinfo, 0, sizeof(devinfo));
1802 devinfo.device_id = device->id+1;
1803 devinfo.chip_id = adreno_dev->chipid;
1804 devinfo.mmu_enabled =
1805 MMU_FEATURE(&device->mmu, KGSL_MMU_PAGED);
1806 devinfo.gmem_gpubaseaddr = adreno_dev->gmem_base;
1807 devinfo.gmem_sizebytes = adreno_dev->gmem_size;
1808
1809 if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
1810 0) {
1811 status = -EFAULT;
1812 break;
1813 }
1814 status = 0;
1815 }
1816 break;
1817 case KGSL_PROP_DEVICE_SHADOW:
1818 {
1819 struct kgsl_shadowprop shadowprop;
1820
1821 if (sizebytes != sizeof(shadowprop)) {
1822 status = -EINVAL;
1823 break;
1824 }
1825 memset(&shadowprop, 0, sizeof(shadowprop));
1826 if (device->memstore.hostptr) {
1827 /*NOTE: with mmu enabled, gpuaddr doesn't mean
1828 * anything to mmap().
1829 */
1830 shadowprop.gpuaddr =
1831 (unsigned int) device->memstore.gpuaddr;
1832 shadowprop.size = device->memstore.size;
1833 /* GSL needs this to be set, even if it
1834 * appears to be meaningless
1835 */
1836 shadowprop.flags = KGSL_FLAGS_INITIALIZED |
1837 KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS;
1838 }
1839 if (copy_to_user(value, &shadowprop,
1840 sizeof(shadowprop))) {
1841 status = -EFAULT;
1842 break;
1843 }
1844 status = 0;
1845 }
1846 break;
1847 case KGSL_PROP_DEVICE_QDSS_STM:
1848 {
1849 struct kgsl_qdss_stm_prop qdssprop = {0};
1850 struct kgsl_memdesc *qdss_desc =
1851 kgsl_mmu_get_qdss_global_entry(device);
1852
1853 if (sizebytes != sizeof(qdssprop)) {
1854 status = -EINVAL;
1855 break;
1856 }
1857
1858 if (qdss_desc) {
1859 qdssprop.gpuaddr = qdss_desc->gpuaddr;
1860 qdssprop.size = qdss_desc->size;
1861 }
1862
1863 if (copy_to_user(value, &qdssprop,
1864 sizeof(qdssprop))) {
1865 status = -EFAULT;
1866 break;
1867 }
1868 status = 0;
1869 }
1870 break;
Jonathan Wicks4892d8d2017-02-24 16:21:26 -07001871 case KGSL_PROP_DEVICE_QTIMER:
1872 {
1873 struct kgsl_qtimer_prop qtimerprop = {0};
1874 struct kgsl_memdesc *qtimer_desc =
1875 kgsl_mmu_get_qtimer_global_entry(device);
1876
1877 if (sizebytes != sizeof(qtimerprop)) {
1878 status = -EINVAL;
1879 break;
1880 }
1881
1882 if (qtimer_desc) {
1883 qtimerprop.gpuaddr = qtimer_desc->gpuaddr;
1884 qtimerprop.size = qtimer_desc->size;
1885 }
1886
1887 if (copy_to_user(value, &qtimerprop,
1888 sizeof(qtimerprop))) {
1889 status = -EFAULT;
1890 break;
1891 }
1892 status = 0;
1893 }
1894 break;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001895 case KGSL_PROP_MMU_ENABLE:
1896 {
1897 /* Report MMU only if we can handle paged memory */
1898 int mmu_prop = MMU_FEATURE(&device->mmu,
1899 KGSL_MMU_PAGED);
1900
1901 if (sizebytes < sizeof(mmu_prop)) {
1902 status = -EINVAL;
1903 break;
1904 }
1905 if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) {
1906 status = -EFAULT;
1907 break;
1908 }
1909 status = 0;
1910 }
1911 break;
1912 case KGSL_PROP_INTERRUPT_WAITS:
1913 {
1914 int int_waits = 1;
1915
1916 if (sizebytes != sizeof(int)) {
1917 status = -EINVAL;
1918 break;
1919 }
1920 if (copy_to_user(value, &int_waits, sizeof(int))) {
1921 status = -EFAULT;
1922 break;
1923 }
1924 status = 0;
1925 }
1926 break;
1927 case KGSL_PROP_UCHE_GMEM_VADDR:
1928 {
1929 uint64_t gmem_vaddr = 0;
1930
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001931 if (adreno_is_a5xx(adreno_dev) ||
1932 adreno_is_a6xx(adreno_dev))
Shrenuj Bansala419c792016-10-20 14:05:11 -07001933 gmem_vaddr = ADRENO_UCHE_GMEM_BASE;
1934 if (sizebytes != sizeof(uint64_t)) {
1935 status = -EINVAL;
1936 break;
1937 }
1938 if (copy_to_user(value, &gmem_vaddr,
1939 sizeof(uint64_t))) {
1940 status = -EFAULT;
1941 break;
1942 }
1943 status = 0;
1944 }
1945 break;
1946 case KGSL_PROP_SP_GENERIC_MEM:
1947 {
1948 struct kgsl_sp_generic_mem sp_mem;
1949
1950 if (sizebytes != sizeof(sp_mem)) {
1951 status = -EINVAL;
1952 break;
1953 }
1954 memset(&sp_mem, 0, sizeof(sp_mem));
1955
1956 sp_mem.local = adreno_dev->sp_local_gpuaddr;
1957 sp_mem.pvt = adreno_dev->sp_pvt_gpuaddr;
1958
1959 if (copy_to_user(value, &sp_mem, sizeof(sp_mem))) {
1960 status = -EFAULT;
1961 break;
1962 }
1963 status = 0;
1964 }
1965 break;
1966 case KGSL_PROP_UCODE_VERSION:
1967 {
1968 struct kgsl_ucode_version ucode;
1969
1970 if (sizebytes != sizeof(ucode)) {
1971 status = -EINVAL;
1972 break;
1973 }
1974 memset(&ucode, 0, sizeof(ucode));
1975
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001976 ucode.pfp = adreno_dev->fw[ADRENO_FW_PFP].version;
1977 ucode.pm4 = adreno_dev->fw[ADRENO_FW_PM4].version;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001978
1979 if (copy_to_user(value, &ucode, sizeof(ucode))) {
1980 status = -EFAULT;
1981 break;
1982 }
1983 status = 0;
1984 }
1985 break;
1986 case KGSL_PROP_GPMU_VERSION:
1987 {
1988 struct kgsl_gpmu_version gpmu;
1989
1990 if (adreno_dev->gpucore == NULL) {
1991 status = -EINVAL;
1992 break;
1993 }
1994
1995 if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
1996 status = -EOPNOTSUPP;
1997 break;
1998 }
1999
2000 if (sizebytes != sizeof(gpmu)) {
2001 status = -EINVAL;
2002 break;
2003 }
2004 memset(&gpmu, 0, sizeof(gpmu));
2005
2006 gpmu.major = adreno_dev->gpucore->gpmu_major;
2007 gpmu.minor = adreno_dev->gpucore->gpmu_minor;
2008 gpmu.features = adreno_dev->gpucore->gpmu_features;
2009
2010 if (copy_to_user(value, &gpmu, sizeof(gpmu))) {
2011 status = -EFAULT;
2012 break;
2013 }
2014 status = 0;
2015 }
2016 break;
2017 case KGSL_PROP_HIGHEST_BANK_BIT:
2018 {
2019 unsigned int bit;
2020
2021 if (sizebytes < sizeof(unsigned int)) {
2022 status = -EINVAL;
2023 break;
2024 }
2025
2026 if (of_property_read_u32(device->pdev->dev.of_node,
2027 "qcom,highest-bank-bit", &bit)) {
2028 status = -EINVAL;
2029 break;
2030 }
2031
2032 if (copy_to_user(value, &bit, sizeof(bit))) {
2033 status = -EFAULT;
2034 break;
2035 }
2036 }
2037 status = 0;
2038 break;
Shrenuj Bansala9ae9de2016-11-15 16:01:00 -08002039 case KGSL_PROP_MIN_ACCESS_LENGTH:
2040 {
2041 unsigned int mal;
2042
2043 if (sizebytes < sizeof(unsigned int)) {
2044 status = -EINVAL;
2045 break;
2046 }
2047
2048 if (of_property_read_u32(device->pdev->dev.of_node,
2049 "qcom,min-access-length", &mal)) {
2050 mal = 0;
2051 }
2052
2053 if (copy_to_user(value, &mal, sizeof(mal))) {
2054 status = -EFAULT;
2055 break;
2056 }
2057 }
2058 status = 0;
2059 break;
2060 case KGSL_PROP_UBWC_MODE:
2061 {
2062 unsigned int mode;
2063
2064 if (sizebytes < sizeof(unsigned int)) {
2065 status = -EINVAL;
2066 break;
2067 }
2068
2069 if (of_property_read_u32(device->pdev->dev.of_node,
2070 "qcom,ubwc-mode", &mode))
2071 mode = 0;
2072
2073 if (copy_to_user(value, &mode, sizeof(mode))) {
2074 status = -EFAULT;
2075 break;
2076 }
2077 }
2078 status = 0;
2079 break;
2080
Shrenuj Bansala419c792016-10-20 14:05:11 -07002081 case KGSL_PROP_DEVICE_BITNESS:
2082 {
2083 unsigned int bitness = 32;
2084
2085 if (sizebytes != sizeof(unsigned int)) {
2086 status = -EINVAL;
2087 break;
2088 }
2089 /* No of bits used by the GPU */
2090 if (adreno_support_64bit(adreno_dev))
2091 bitness = 48;
2092
2093 if (copy_to_user(value, &bitness,
2094 sizeof(unsigned int))) {
2095 status = -EFAULT;
2096 break;
2097 }
2098 status = 0;
2099 }
2100 break;
2101
2102 default:
2103 status = -EINVAL;
2104 }
2105
2106 return status;
2107}
2108
2109int adreno_set_constraint(struct kgsl_device *device,
2110 struct kgsl_context *context,
2111 struct kgsl_device_constraint *constraint)
2112{
2113 int status = 0;
2114
2115 switch (constraint->type) {
2116 case KGSL_CONSTRAINT_PWRLEVEL: {
2117 struct kgsl_device_constraint_pwrlevel pwr;
2118
2119 if (constraint->size != sizeof(pwr)) {
2120 status = -EINVAL;
2121 break;
2122 }
2123
2124 if (copy_from_user(&pwr,
2125 (void __user *)constraint->data,
2126 sizeof(pwr))) {
2127 status = -EFAULT;
2128 break;
2129 }
2130 if (pwr.level >= KGSL_CONSTRAINT_PWR_MAXLEVELS) {
2131 status = -EINVAL;
2132 break;
2133 }
2134
2135 context->pwr_constraint.type =
2136 KGSL_CONSTRAINT_PWRLEVEL;
2137 context->pwr_constraint.sub_type = pwr.level;
2138 trace_kgsl_user_pwrlevel_constraint(device,
2139 context->id,
2140 context->pwr_constraint.type,
2141 context->pwr_constraint.sub_type);
2142 }
2143 break;
2144 case KGSL_CONSTRAINT_NONE:
2145 if (context->pwr_constraint.type == KGSL_CONSTRAINT_PWRLEVEL)
2146 trace_kgsl_user_pwrlevel_constraint(device,
2147 context->id,
2148 KGSL_CONSTRAINT_NONE,
2149 context->pwr_constraint.sub_type);
2150 context->pwr_constraint.type = KGSL_CONSTRAINT_NONE;
2151 break;
2152
2153 default:
2154 status = -EINVAL;
2155 break;
2156 }
2157
2158 /* If a new constraint has been set for a context, cancel the old one */
2159 if ((status == 0) &&
2160 (context->id == device->pwrctrl.constraint.owner_id)) {
2161 trace_kgsl_constraint(device, device->pwrctrl.constraint.type,
2162 device->pwrctrl.active_pwrlevel, 0);
2163 device->pwrctrl.constraint.type = KGSL_CONSTRAINT_NONE;
2164 }
2165
2166 return status;
2167}
2168
2169static int adreno_setproperty(struct kgsl_device_private *dev_priv,
2170 unsigned int type,
2171 void __user *value,
2172 unsigned int sizebytes)
2173{
2174 int status = -EINVAL;
2175 struct kgsl_device *device = dev_priv->device;
2176 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2177
2178 switch (type) {
2179 case KGSL_PROP_PWRCTRL: {
2180 unsigned int enable;
2181
2182 if (sizebytes != sizeof(enable))
2183 break;
2184
2185 if (copy_from_user(&enable, value, sizeof(enable))) {
2186 status = -EFAULT;
2187 break;
2188 }
2189
2190 mutex_lock(&device->mutex);
2191
2192 if (enable) {
2193 device->pwrctrl.ctrl_flags = 0;
2194
2195 if (!kgsl_active_count_get(device)) {
2196 adreno_fault_detect_start(adreno_dev);
2197 kgsl_active_count_put(device);
2198 }
2199
2200 kgsl_pwrscale_enable(device);
2201 } else {
2202 kgsl_pwrctrl_change_state(device,
2203 KGSL_STATE_ACTIVE);
2204 device->pwrctrl.ctrl_flags = KGSL_PWR_ON;
2205 adreno_fault_detect_stop(adreno_dev);
2206 kgsl_pwrscale_disable(device, true);
2207 }
2208
2209 mutex_unlock(&device->mutex);
2210 status = 0;
2211 }
2212 break;
2213 case KGSL_PROP_PWR_CONSTRAINT: {
2214 struct kgsl_device_constraint constraint;
2215 struct kgsl_context *context;
2216
2217 if (sizebytes != sizeof(constraint))
2218 break;
2219
2220 if (copy_from_user(&constraint, value,
2221 sizeof(constraint))) {
2222 status = -EFAULT;
2223 break;
2224 }
2225
2226 context = kgsl_context_get_owner(dev_priv,
2227 constraint.context_id);
2228
2229 if (context == NULL)
2230 break;
2231
2232 status = adreno_set_constraint(device, context,
2233 &constraint);
2234
2235 kgsl_context_put(context);
2236 }
2237 break;
2238 default:
2239 break;
2240 }
2241
2242 return status;
2243}
2244
2245/*
2246 * adreno_irq_pending() - Checks if interrupt is generated by h/w
2247 * @adreno_dev: Pointer to device whose interrupts are checked
2248 *
2249 * Returns true if interrupts are pending from device else 0.
2250 */
2251inline unsigned int adreno_irq_pending(struct adreno_device *adreno_dev)
2252{
2253 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
2254 unsigned int status;
2255
2256 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
2257
Deepak Kumar273c5712017-01-03 21:49:03 +05302258 /*
2259 * IRQ handler clears the RBBM INT0 status register immediately
2260 * entering the ISR before actually serving the interrupt because
2261 * of this we can't rely only on RBBM INT0 status only.
2262 * Use pending_irq_refcnt along with RBBM INT0 to correctly
2263 * determine whether any IRQ is pending or not.
2264 */
2265 if ((status & gpudev->irq->mask) ||
2266 atomic_read(&adreno_dev->pending_irq_refcnt))
2267 return 1;
2268 else
2269 return 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002270}
2271
2272
2273/**
2274 * adreno_hw_isidle() - Check if the GPU core is idle
2275 * @adreno_dev: Pointer to the Adreno device structure for the GPU
2276 *
2277 * Return true if the RBBM status register for the GPU type indicates that the
2278 * hardware is idle
2279 */
2280bool adreno_hw_isidle(struct adreno_device *adreno_dev)
2281{
2282 const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
2283 unsigned int reg_rbbm_status;
Oleg Perelet62d5cec2017-03-27 16:14:52 -07002284 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
2285
2286 /* if hw driver implements idle check - use it */
2287 if (gpudev->hw_isidle)
2288 return gpudev->hw_isidle(adreno_dev);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002289
2290 if (adreno_is_a540(adreno_dev))
2291 /**
2292 * Due to CRC idle throttling GPU
2293 * idle hysteresys can take up to
2294 * 3usec for expire - account for it
2295 */
2296 udelay(5);
2297
2298 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
2299 &reg_rbbm_status);
2300
2301 if (reg_rbbm_status & gpucore->busy_mask)
2302 return false;
2303
2304 /* Don't consider ourselves idle if there is an IRQ pending */
2305 if (adreno_irq_pending(adreno_dev))
2306 return false;
2307
2308 return true;
2309}
2310
2311/**
2312 * adreno_soft_reset() - Do a soft reset of the GPU hardware
2313 * @device: KGSL device to soft reset
2314 *
2315 * "soft reset" the GPU hardware - this is a fast path GPU reset
2316 * The GPU hardware is reset but we never pull power so we can skip
2317 * a lot of the standard adreno_stop/adreno_start sequence
2318 */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07002319int adreno_soft_reset(struct kgsl_device *device)
Shrenuj Bansala419c792016-10-20 14:05:11 -07002320{
2321 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2322 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
2323 int ret;
2324
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002325 if (gpudev->oob_set) {
2326 ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
2327 OOB_CPINIT_CHECK_MASK,
2328 OOB_CPINIT_CLEAR_MASK);
2329 if (ret)
2330 return ret;
2331 }
2332
Shrenuj Bansala419c792016-10-20 14:05:11 -07002333 kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
2334 adreno_set_active_ctxs_null(adreno_dev);
2335
2336 adreno_irqctrl(adreno_dev, 0);
2337
2338 adreno_clear_gpu_fault(adreno_dev);
2339 /* since device is oficially off now clear start bit */
2340 clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
2341
2342 /* save physical performance counter values before GPU soft reset */
2343 adreno_perfcounter_save(adreno_dev);
2344
2345 /* Reset the GPU */
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002346 if (gpudev->soft_reset)
2347 ret = gpudev->soft_reset(adreno_dev);
2348 else
2349 ret = _soft_reset(adreno_dev);
2350 if (ret) {
2351 if (gpudev->oob_clear)
2352 gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
2353 return ret;
2354 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07002355
Abhilash Kumare0118252017-06-15 12:25:24 +05302356 /* Clear the busy_data stats - we're starting over from scratch */
2357 adreno_dev->busy_data.gpu_busy = 0;
2358 adreno_dev->busy_data.vbif_ram_cycles = 0;
2359 adreno_dev->busy_data.vbif_starved_ram = 0;
2360
Shrenuj Bansala419c792016-10-20 14:05:11 -07002361 /* Set the page table back to the default page table */
2362 adreno_ringbuffer_set_global(adreno_dev, 0);
2363 kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);
2364
2365 _set_secvid(device);
2366
2367 /* Enable 64 bit gpu addr if feature is set */
2368 if (gpudev->enable_64bit &&
2369 adreno_support_64bit(adreno_dev))
2370 gpudev->enable_64bit(adreno_dev);
2371
2372
2373 /* Reinitialize the GPU */
2374 gpudev->start(adreno_dev);
2375
2376 /* Re-initialize the coresight registers if applicable */
2377 adreno_coresight_start(adreno_dev);
2378
2379 /* Enable IRQ */
2380 adreno_irqctrl(adreno_dev, 1);
2381
2382 /* stop all ringbuffers to cancel RB events */
2383 adreno_ringbuffer_stop(adreno_dev);
2384 /*
2385 * If we have offsets for the jump tables we can try to do a warm start,
2386 * otherwise do a full ringbuffer restart
2387 */
2388
2389 if (ADRENO_FEATURE(adreno_dev, ADRENO_WARM_START))
2390 ret = adreno_ringbuffer_start(adreno_dev, ADRENO_START_WARM);
2391 else
2392 ret = adreno_ringbuffer_start(adreno_dev, ADRENO_START_COLD);
2393 if (ret == 0) {
2394 device->reset_counter++;
2395 set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
2396 }
2397
2398 /* Restore physical performance counter values after soft reset */
2399 adreno_perfcounter_restore(adreno_dev);
2400
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002401 if (gpudev->oob_clear)
2402 gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
2403
Shrenuj Bansala419c792016-10-20 14:05:11 -07002404 return ret;
2405}
2406
2407/*
2408 * adreno_isidle() - return true if the GPU hardware is idle
2409 * @device: Pointer to the KGSL device structure for the GPU
2410 *
2411 * Return true if the GPU hardware is idle and there are no commands pending in
2412 * the ringbuffer
2413 */
2414bool adreno_isidle(struct kgsl_device *device)
2415{
2416 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2417 struct adreno_ringbuffer *rb;
2418 int i;
2419
2420 if (!kgsl_state_is_awake(device))
2421 return true;
2422
2423 /*
2424 * wptr is updated when we add commands to ringbuffer, add a barrier
2425 * to make sure updated wptr is compared to rptr
2426 */
2427 smp_mb();
2428
2429 /*
2430 * ringbuffer is truly idle when all ringbuffers read and write
2431 * pointers are equal
2432 */
2433
2434 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
2435 if (!adreno_rb_empty(rb))
2436 return false;
2437 }
2438
2439 return adreno_hw_isidle(adreno_dev);
2440}
2441
Carter Cooper8567af02017-03-15 14:22:03 -06002442/* Print some key registers if a spin-for-idle times out */
2443void adreno_spin_idle_debug(struct adreno_device *adreno_dev,
2444 const char *str)
2445{
2446 struct kgsl_device *device = &adreno_dev->dev;
2447 unsigned int rptr, wptr;
2448 unsigned int status, status3, intstatus;
2449 unsigned int hwfault;
2450
2451 dev_err(device->dev, str);
2452
2453 adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
2454 adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
2455
2456 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS, &status);
2457 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS3, &status3);
2458 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &intstatus);
2459 adreno_readreg(adreno_dev, ADRENO_REG_CP_HW_FAULT, &hwfault);
2460
2461 dev_err(device->dev,
2462 "rb=%d pos=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
2463 adreno_dev->cur_rb->id, rptr, wptr, status, status3, intstatus);
2464
2465 dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
2466
Lynus Vaz43695aa2017-09-01 21:55:23 +05302467 kgsl_device_snapshot(device, NULL, adreno_gmu_gpu_fault(adreno_dev));
Carter Cooper8567af02017-03-15 14:22:03 -06002468}
2469
Shrenuj Bansala419c792016-10-20 14:05:11 -07002470/**
2471 * adreno_spin_idle() - Spin wait for the GPU to idle
2472 * @adreno_dev: Pointer to an adreno device
2473 * @timeout: milliseconds to wait before returning error
2474 *
2475 * Spin the CPU waiting for the RBBM status to return idle
2476 */
2477int adreno_spin_idle(struct adreno_device *adreno_dev, unsigned int timeout)
2478{
2479 unsigned long wait = jiffies + msecs_to_jiffies(timeout);
2480
2481 do {
2482 /*
2483 * If we fault, stop waiting and return an error. The dispatcher
2484 * will clean up the fault from the work queue, but we need to
2485 * make sure we don't block it by waiting for an idle that
2486 * will never come.
2487 */
2488
2489 if (adreno_gpu_fault(adreno_dev) != 0)
2490 return -EDEADLK;
2491
2492 if (adreno_isidle(KGSL_DEVICE(adreno_dev)))
2493 return 0;
2494
2495 } while (time_before(jiffies, wait));
2496
2497 /*
2498 * Under rare conditions, preemption can cause the while loop to exit
2499 * without checking if the gpu is idle. check one last time before we
2500 * return failure.
2501 */
2502 if (adreno_gpu_fault(adreno_dev) != 0)
2503 return -EDEADLK;
2504
2505 if (adreno_isidle(KGSL_DEVICE(adreno_dev)))
2506 return 0;
2507
2508 return -ETIMEDOUT;
2509}
2510
2511/**
2512 * adreno_idle() - wait for the GPU hardware to go idle
2513 * @device: Pointer to the KGSL device structure for the GPU
2514 *
2515 * Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
2516 * Caller must hold the device mutex, and must not hold the dispatcher mutex.
2517 */
2518
2519int adreno_idle(struct kgsl_device *device)
2520{
2521 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2522 int ret;
2523
2524 /*
2525 * Make sure the device mutex is held so the dispatcher can't send any
2526 * more commands to the hardware
2527 */
2528
2529 if (WARN_ON(!mutex_is_locked(&device->mutex)))
2530 return -EDEADLK;
2531
2532 /* Check if we are already idle before idling dispatcher */
2533 if (adreno_isidle(device))
2534 return 0;
2535 /*
2536 * Wait for dispatcher to finish completing commands
2537 * already submitted
2538 */
2539 ret = adreno_dispatcher_idle(adreno_dev);
2540 if (ret)
2541 return ret;
2542
2543 return adreno_spin_idle(adreno_dev, ADRENO_IDLE_TIMEOUT);
2544}
2545
2546/**
2547 * adreno_drain() - Drain the dispatch queue
2548 * @device: Pointer to the KGSL device structure for the GPU
2549 *
2550 * Drain the dispatcher of existing drawobjs. This halts
2551 * additional commands from being issued until the gate is completed.
2552 */
2553static int adreno_drain(struct kgsl_device *device)
2554{
2555 reinit_completion(&device->halt_gate);
2556
2557 return 0;
2558}
2559
2560/* Caller must hold the device mutex. */
2561static int adreno_suspend_context(struct kgsl_device *device)
2562{
2563 /* process any profiling results that are available */
2564 adreno_profile_process_results(ADRENO_DEVICE(device));
2565
2566 /* Wait for the device to go idle */
2567 return adreno_idle(device);
2568}
2569
2570/**
2571 * adreno_read - General read function to read adreno device memory
2572 * @device - Pointer to the GPU device struct (for adreno device)
2573 * @base - Base address (kernel virtual) where the device memory is mapped
2574 * @offsetwords - Offset in words from the base address, of the memory that
2575 * is to be read
2576 * @value - Value read from the device memory
2577 * @mem_len - Length of the device memory mapped to the kernel
2578 */
2579static void adreno_read(struct kgsl_device *device, void __iomem *base,
2580 unsigned int offsetwords, unsigned int *value,
2581 unsigned int mem_len)
2582{
2583
2584 void __iomem *reg;
2585
2586 /* Make sure we're not reading from invalid memory */
2587 if (WARN(offsetwords * sizeof(uint32_t) >= mem_len,
2588 "Out of bounds register read: 0x%x/0x%x\n",
2589 offsetwords, mem_len >> 2))
2590 return;
2591
2592 reg = (base + (offsetwords << 2));
2593
2594 if (!in_interrupt())
2595 kgsl_pre_hwaccess(device);
2596
2597 *value = __raw_readl(reg);
2598 /*
2599 * ensure this read finishes before the next one.
2600 * i.e. act like normal readl()
2601 */
2602 rmb();
2603}
2604
2605/**
2606 * adreno_regread - Used to read adreno device registers
2607 * @offsetwords - Word (4 Bytes) offset to the register to be read
2608 * @value - Value read from device register
2609 */
2610static void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
2611 unsigned int *value)
2612{
2613 adreno_read(device, device->reg_virt, offsetwords, value,
2614 device->reg_len);
2615}
2616
2617/**
2618 * adreno_shadermem_regread - Used to read GPU (adreno) shader memory
2619 * @device - GPU device whose shader memory is to be read
2620 * @offsetwords - Offset in words, of the shader memory address to be read
2621 * @value - Pointer to where the read shader mem value is to be stored
2622 */
2623void adreno_shadermem_regread(struct kgsl_device *device,
2624 unsigned int offsetwords, unsigned int *value)
2625{
2626 adreno_read(device, device->shader_mem_virt, offsetwords, value,
2627 device->shader_mem_len);
2628}
2629
2630static void adreno_regwrite(struct kgsl_device *device,
2631 unsigned int offsetwords,
2632 unsigned int value)
2633{
2634 void __iomem *reg;
2635
2636 /* Make sure we're not writing to an invalid register */
2637 if (WARN(offsetwords * sizeof(uint32_t) >= device->reg_len,
2638 "Out of bounds register write: 0x%x/0x%x\n",
2639 offsetwords, device->reg_len >> 2))
2640 return;
2641
2642 if (!in_interrupt())
2643 kgsl_pre_hwaccess(device);
2644
2645 trace_kgsl_regwrite(device, offsetwords, value);
2646
2647 reg = (device->reg_virt + (offsetwords << 2));
2648
2649 /*
2650 * ensure previous writes post before this one,
2651 * i.e. act like normal writel()
2652 */
2653 wmb();
2654 __raw_writel(value, reg);
2655}
2656
Kyle Pieferb1027b02017-02-10 13:58:58 -08002657static void adreno_gmu_regwrite(struct kgsl_device *device,
2658 unsigned int offsetwords,
2659 unsigned int value)
2660{
2661 void __iomem *reg;
2662 struct gmu_device *gmu = &device->gmu;
2663
Kyle Pieferb1027b02017-02-10 13:58:58 -08002664 trace_kgsl_regwrite(device, offsetwords, value);
2665
Kyle Pieferda6ef632017-06-29 13:18:51 -07002666 offsetwords -= gmu->gmu2gpu_offset;
Kyle Pieferb1027b02017-02-10 13:58:58 -08002667 reg = gmu->reg_virt + (offsetwords << 2);
2668
2669 /*
2670 * ensure previous writes post before this one,
2671 * i.e. act like normal writel()
2672 */
2673 wmb();
2674 __raw_writel(value, reg);
2675}
2676
2677static void adreno_gmu_regread(struct kgsl_device *device,
2678 unsigned int offsetwords,
2679 unsigned int *value)
2680{
2681 void __iomem *reg;
2682 struct gmu_device *gmu = &device->gmu;
2683
2684 offsetwords -= gmu->gmu2gpu_offset;
2685
2686 reg = gmu->reg_virt + (offsetwords << 2);
2687
2688 *value = __raw_readl(reg);
2689
2690 /*
2691 * ensure this read finishes before the next one.
2692 * i.e. act like normal readl()
2693 */
2694 rmb();
2695}
2696
Shrenuj Bansala419c792016-10-20 14:05:11 -07002697/**
2698 * adreno_waittimestamp - sleep while waiting for the specified timestamp
2699 * @device - pointer to a KGSL device structure
2700 * @context - pointer to the active kgsl context
2701 * @timestamp - GPU timestamp to wait for
2702 * @msecs - amount of time to wait (in milliseconds)
2703 *
2704 * Wait up to 'msecs' milliseconds for the specified timestamp to expire.
2705 */
2706static int adreno_waittimestamp(struct kgsl_device *device,
2707 struct kgsl_context *context,
2708 unsigned int timestamp,
2709 unsigned int msecs)
2710{
2711 int ret;
2712
2713 if (context == NULL) {
2714 /* If they are doing then complain once */
2715 dev_WARN_ONCE(device->dev, 1,
2716 "IOCTL_KGSL_DEVICE_WAITTIMESTAMP is deprecated\n");
2717 return -ENOTTY;
2718 }
2719
2720 /* Return -ENOENT if the context has been detached */
2721 if (kgsl_context_detached(context))
2722 return -ENOENT;
2723
2724 ret = adreno_drawctxt_wait(ADRENO_DEVICE(device), context,
2725 timestamp, msecs);
2726
2727 /* If the context got invalidated then return a specific error */
2728 if (kgsl_context_invalid(context))
2729 ret = -EDEADLK;
2730
2731 /*
2732 * Return -EPROTO if the device has faulted since the last time we
2733 * checked. Userspace uses this as a marker for performing post
2734 * fault activities
2735 */
2736
2737 if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv))
2738 ret = -EPROTO;
2739
2740 return ret;
2741}
2742
2743/**
2744 * __adreno_readtimestamp() - Reads the timestamp from memstore memory
2745 * @adreno_dev: Pointer to an adreno device
2746 * @index: Index into the memstore memory
2747 * @type: Type of timestamp to read
2748 * @timestamp: The out parameter where the timestamp is read
2749 */
2750static int __adreno_readtimestamp(struct adreno_device *adreno_dev, int index,
2751 int type, unsigned int *timestamp)
2752{
2753 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
2754 int status = 0;
2755
2756 switch (type) {
2757 case KGSL_TIMESTAMP_CONSUMED:
2758 kgsl_sharedmem_readl(&device->memstore, timestamp,
2759 KGSL_MEMSTORE_OFFSET(index, soptimestamp));
2760 break;
2761 case KGSL_TIMESTAMP_RETIRED:
2762 kgsl_sharedmem_readl(&device->memstore, timestamp,
2763 KGSL_MEMSTORE_OFFSET(index, eoptimestamp));
2764 break;
2765 default:
2766 status = -EINVAL;
2767 *timestamp = 0;
2768 break;
2769 }
2770 return status;
2771}
2772
2773/**
2774 * adreno_rb_readtimestamp(): Return the value of given type of timestamp
2775 * for a RB
2776 * @adreno_dev: adreno device whose timestamp values are being queried
2777 * @priv: The object being queried for a timestamp (expected to be a rb pointer)
2778 * @type: The type of timestamp (one of 3) to be read
2779 * @timestamp: Pointer to where the read timestamp is to be written to
2780 *
2781 * CONSUMED and RETIRED type timestamps are sorted by id and are constantly
2782 * updated by the GPU through shared memstore memory. QUEUED type timestamps
2783 * are read directly from context struct.
2784
2785 * The function returns 0 on success and timestamp value at the *timestamp
2786 * address and returns -EINVAL on any read error/invalid type and timestamp = 0.
2787 */
2788int adreno_rb_readtimestamp(struct adreno_device *adreno_dev,
2789 void *priv, enum kgsl_timestamp_type type,
2790 unsigned int *timestamp)
2791{
2792 int status = 0;
2793 struct adreno_ringbuffer *rb = priv;
2794
2795 if (type == KGSL_TIMESTAMP_QUEUED)
2796 *timestamp = rb->timestamp;
2797 else
2798 status = __adreno_readtimestamp(adreno_dev,
2799 rb->id + KGSL_MEMSTORE_MAX,
2800 type, timestamp);
2801
2802 return status;
2803}
2804
2805/**
2806 * adreno_readtimestamp(): Return the value of given type of timestamp
2807 * @device: GPU device whose timestamp values are being queried
2808 * @priv: The object being queried for a timestamp (expected to be a context)
2809 * @type: The type of timestamp (one of 3) to be read
2810 * @timestamp: Pointer to where the read timestamp is to be written to
2811 *
2812 * CONSUMED and RETIRED type timestamps are sorted by id and are constantly
2813 * updated by the GPU through shared memstore memory. QUEUED type timestamps
2814 * are read directly from context struct.
2815
2816 * The function returns 0 on success and timestamp value at the *timestamp
2817 * address and returns -EINVAL on any read error/invalid type and timestamp = 0.
2818 */
2819static int adreno_readtimestamp(struct kgsl_device *device,
2820 void *priv, enum kgsl_timestamp_type type,
2821 unsigned int *timestamp)
2822{
2823 int status = 0;
2824 struct kgsl_context *context = priv;
2825
2826 if (type == KGSL_TIMESTAMP_QUEUED) {
2827 struct adreno_context *ctxt = ADRENO_CONTEXT(context);
2828
2829 *timestamp = ctxt->timestamp;
2830 } else
2831 status = __adreno_readtimestamp(ADRENO_DEVICE(device),
2832 context->id, type, timestamp);
2833
2834 return status;
2835}
2836
2837static inline s64 adreno_ticks_to_us(u32 ticks, u32 freq)
2838{
2839 freq /= 1000000;
2840 return ticks / freq;
2841}
2842
2843/**
2844 * adreno_power_stats() - Reads the counters needed for freq decisions
2845 * @device: Pointer to device whose counters are read
2846 * @stats: Pointer to stats set that needs updating
2847 * Power: The caller is expected to be in a clock enabled state as this
2848 * function does reg reads
2849 */
2850static void adreno_power_stats(struct kgsl_device *device,
2851 struct kgsl_power_stats *stats)
2852{
2853 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2854 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
2855 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
2856 struct adreno_busy_data *busy = &adreno_dev->busy_data;
2857 uint64_t adj = 0;
2858
2859 memset(stats, 0, sizeof(*stats));
2860
2861 /* Get the busy cycles counted since the counter was last reset */
2862 if (adreno_dev->perfctr_pwr_lo != 0) {
2863 uint64_t gpu_busy;
2864
2865 gpu_busy = counter_delta(device, adreno_dev->perfctr_pwr_lo,
2866 &busy->gpu_busy);
2867
2868 if (gpudev->read_throttling_counters) {
2869 adj = gpudev->read_throttling_counters(adreno_dev);
2870 gpu_busy += adj;
2871 }
2872
Deepak Kumar2c8ea992017-09-18 19:59:17 +05302873 if (adreno_is_a6xx(adreno_dev)) {
George Shen07550732017-06-01 11:54:16 -07002874 /* clock sourced from XO */
2875 stats->busy_time = gpu_busy * 10 / 192;
2876 } else {
2877 /* clock sourced from GFX3D */
2878 stats->busy_time = adreno_ticks_to_us(gpu_busy,
2879 kgsl_pwrctrl_active_freq(pwr));
2880 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07002881 }
2882
2883 if (device->pwrctrl.bus_control) {
2884 uint64_t ram_cycles = 0, starved_ram = 0;
2885
2886 if (adreno_dev->ram_cycles_lo != 0)
2887 ram_cycles = counter_delta(device,
2888 adreno_dev->ram_cycles_lo,
2889 &busy->vbif_ram_cycles);
2890
2891 if (adreno_dev->starved_ram_lo != 0)
2892 starved_ram = counter_delta(device,
2893 adreno_dev->starved_ram_lo,
2894 &busy->vbif_starved_ram);
2895
2896 stats->ram_time = ram_cycles;
2897 stats->ram_wait = starved_ram;
2898 }
2899 if (adreno_dev->lm_threshold_count &&
2900 gpudev->count_throttles)
2901 gpudev->count_throttles(adreno_dev, adj);
2902}
2903
2904static unsigned int adreno_gpuid(struct kgsl_device *device,
2905 unsigned int *chipid)
2906{
2907 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2908
2909 /*
2910 * Some applications need to know the chip ID too, so pass
2911 * that as a parameter
2912 */
2913
2914 if (chipid != NULL)
2915 *chipid = adreno_dev->chipid;
2916
2917 /*
2918 * Standard KGSL gpuid format:
2919 * top word is 0x0002 for 2D or 0x0003 for 3D
2920 * Bottom word is core specific identifer
2921 */
2922
2923 return (0x0003 << 16) | ADRENO_GPUREV(adreno_dev);
2924}
2925
2926static int adreno_regulator_enable(struct kgsl_device *device)
2927{
2928 int ret = 0;
2929 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2930 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
2931
2932 if (gpudev->regulator_enable &&
2933 !test_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
2934 &adreno_dev->priv)) {
2935 ret = gpudev->regulator_enable(adreno_dev);
2936 if (!ret)
2937 set_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
2938 &adreno_dev->priv);
2939 }
2940 return ret;
2941}
2942
2943static bool adreno_is_hw_collapsible(struct kgsl_device *device)
2944{
2945 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2946 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
2947
2948 /*
2949 * Skip power collapse for A304, if power ctrl flag is set to
2950 * non zero. As A304 soft_reset will not work, power collapse
2951 * needs to disable to avoid soft_reset.
2952 */
2953 if (adreno_is_a304(adreno_dev) &&
2954 device->pwrctrl.ctrl_flags)
2955 return false;
2956
2957 return adreno_isidle(device) && (gpudev->is_sptp_idle ?
2958 gpudev->is_sptp_idle(adreno_dev) : true);
2959}
2960
2961static void adreno_regulator_disable(struct kgsl_device *device)
2962{
2963 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2964 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
2965
2966 if (gpudev->regulator_disable &&
2967 test_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
2968 &adreno_dev->priv)) {
2969 gpudev->regulator_disable(adreno_dev);
2970 clear_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
2971 &adreno_dev->priv);
2972 }
2973}
2974
2975static void adreno_pwrlevel_change_settings(struct kgsl_device *device,
2976 unsigned int prelevel, unsigned int postlevel, bool post)
2977{
2978 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2979 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
2980
2981 if (gpudev->pwrlevel_change_settings)
2982 gpudev->pwrlevel_change_settings(adreno_dev, prelevel,
2983 postlevel, post);
2984}
2985
2986static void adreno_clk_set_options(struct kgsl_device *device, const char *name,
Deepak Kumara309e0e2017-03-17 17:27:42 +05302987 struct clk *clk, bool on)
Shrenuj Bansala419c792016-10-20 14:05:11 -07002988{
2989 if (ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options)
2990 ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options(
Deepak Kumara309e0e2017-03-17 17:27:42 +05302991 ADRENO_DEVICE(device), name, clk, on);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002992}
2993
2994static void adreno_iommu_sync(struct kgsl_device *device, bool sync)
2995{
2996 struct scm_desc desc = {0};
2997 int ret;
2998
2999 if (sync == true) {
3000 mutex_lock(&kgsl_mmu_sync);
3001 desc.args[0] = true;
3002 desc.arginfo = SCM_ARGS(1);
3003 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_PWR, 0x8), &desc);
3004 if (ret)
3005 KGSL_DRV_ERR(device,
3006 "MMU sync with Hypervisor off %x\n", ret);
3007 } else {
3008 desc.args[0] = false;
3009 desc.arginfo = SCM_ARGS(1);
3010 scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_PWR, 0x8), &desc);
3011 mutex_unlock(&kgsl_mmu_sync);
3012 }
3013}
3014
3015static void _regulator_disable(struct kgsl_regulator *regulator, bool poll)
3016{
3017 unsigned long wait_time = jiffies + msecs_to_jiffies(200);
3018
3019 if (IS_ERR_OR_NULL(regulator->reg))
3020 return;
3021
3022 regulator_disable(regulator->reg);
3023
3024 if (poll == false)
3025 return;
3026
3027 while (!time_after(jiffies, wait_time)) {
3028 if (!regulator_is_enabled(regulator->reg))
3029 return;
3030 cpu_relax();
3031 }
3032
3033 KGSL_CORE_ERR("regulator '%s' still on after 200ms\n", regulator->name);
3034}
3035
3036static void adreno_regulator_disable_poll(struct kgsl_device *device)
3037{
3038 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
3039 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
3040 int i;
3041
3042 /* Fast path - hopefully we don't need this quirk */
3043 if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_IOMMU_SYNC)) {
3044 for (i = KGSL_MAX_REGULATORS - 1; i >= 0; i--)
3045 _regulator_disable(&pwr->regulators[i], false);
3046 return;
3047 }
3048
3049 adreno_iommu_sync(device, true);
3050
3051 for (i = 0; i < KGSL_MAX_REGULATORS; i++)
3052 _regulator_disable(&pwr->regulators[i], true);
3053
3054 adreno_iommu_sync(device, false);
3055}
3056
3057static void adreno_gpu_model(struct kgsl_device *device, char *str,
3058 size_t bufsz)
3059{
3060 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
3061
3062 snprintf(str, bufsz, "Adreno%d%d%dv%d",
3063 ADRENO_CHIPID_CORE(adreno_dev->chipid),
3064 ADRENO_CHIPID_MAJOR(adreno_dev->chipid),
3065 ADRENO_CHIPID_MINOR(adreno_dev->chipid),
3066 ADRENO_CHIPID_PATCH(adreno_dev->chipid) + 1);
3067}
3068
3069static const struct kgsl_functable adreno_functable = {
3070 /* Mandatory functions */
3071 .regread = adreno_regread,
3072 .regwrite = adreno_regwrite,
Kyle Pieferb1027b02017-02-10 13:58:58 -08003073 .gmu_regread = adreno_gmu_regread,
3074 .gmu_regwrite = adreno_gmu_regwrite,
Shrenuj Bansala419c792016-10-20 14:05:11 -07003075 .idle = adreno_idle,
3076 .isidle = adreno_isidle,
3077 .suspend_context = adreno_suspend_context,
3078 .init = adreno_init,
3079 .start = adreno_start,
3080 .stop = adreno_stop,
3081 .getproperty = adreno_getproperty,
3082 .getproperty_compat = adreno_getproperty_compat,
3083 .waittimestamp = adreno_waittimestamp,
3084 .readtimestamp = adreno_readtimestamp,
3085 .queue_cmds = adreno_dispatcher_queue_cmds,
3086 .ioctl = adreno_ioctl,
3087 .compat_ioctl = adreno_compat_ioctl,
3088 .power_stats = adreno_power_stats,
3089 .gpuid = adreno_gpuid,
3090 .snapshot = adreno_snapshot,
3091 .irq_handler = adreno_irq_handler,
3092 .drain = adreno_drain,
3093 /* Optional functions */
Carter Cooperb88b7082017-09-14 09:03:26 -06003094 .snapshot_gmu = adreno_snapshot_gmu,
Shrenuj Bansala419c792016-10-20 14:05:11 -07003095 .drawctxt_create = adreno_drawctxt_create,
3096 .drawctxt_detach = adreno_drawctxt_detach,
3097 .drawctxt_destroy = adreno_drawctxt_destroy,
3098 .drawctxt_dump = adreno_drawctxt_dump,
3099 .setproperty = adreno_setproperty,
3100 .setproperty_compat = adreno_setproperty_compat,
3101 .drawctxt_sched = adreno_drawctxt_sched,
3102 .resume = adreno_dispatcher_start,
3103 .regulator_enable = adreno_regulator_enable,
3104 .is_hw_collapsible = adreno_is_hw_collapsible,
3105 .regulator_disable = adreno_regulator_disable,
3106 .pwrlevel_change_settings = adreno_pwrlevel_change_settings,
3107 .regulator_disable_poll = adreno_regulator_disable_poll,
3108 .clk_set_options = adreno_clk_set_options,
3109 .gpu_model = adreno_gpu_model,
Hareesh Gundua2fe6ec2017-03-06 14:53:36 +05303110 .stop_fault_timer = adreno_dispatcher_stop_fault_timer,
Shrenuj Bansala419c792016-10-20 14:05:11 -07003111};
3112
3113static struct platform_driver adreno_platform_driver = {
3114 .probe = adreno_probe,
3115 .remove = adreno_remove,
3116 .suspend = kgsl_suspend_driver,
3117 .resume = kgsl_resume_driver,
3118 .id_table = adreno_id_table,
3119 .driver = {
3120 .owner = THIS_MODULE,
3121 .name = DEVICE_3D_NAME,
3122 .pm = &kgsl_pm_ops,
3123 .of_match_table = adreno_match_table,
3124 }
3125};
3126
3127static const struct of_device_id busmon_match_table[] = {
3128 { .compatible = "qcom,kgsl-busmon", .data = &device_3d0 },
3129 {}
3130};
3131
3132static int adreno_busmon_probe(struct platform_device *pdev)
3133{
3134 struct kgsl_device *device;
3135 const struct of_device_id *pdid =
3136 of_match_device(busmon_match_table, &pdev->dev);
3137
3138 if (pdid == NULL)
3139 return -ENXIO;
3140
3141 device = (struct kgsl_device *)pdid->data;
3142 device->busmondev = &pdev->dev;
3143 dev_set_drvdata(device->busmondev, device);
3144
3145 return 0;
3146}
3147
3148static struct platform_driver kgsl_bus_platform_driver = {
3149 .probe = adreno_busmon_probe,
3150 .driver = {
3151 .owner = THIS_MODULE,
3152 .name = "kgsl-busmon",
3153 .of_match_table = busmon_match_table,
3154 }
3155};
3156
3157static int __init kgsl_3d_init(void)
3158{
3159 int ret;
3160
3161 ret = platform_driver_register(&kgsl_bus_platform_driver);
3162 if (ret)
3163 return ret;
3164
3165 ret = platform_driver_register(&adreno_platform_driver);
3166 if (ret)
3167 platform_driver_unregister(&kgsl_bus_platform_driver);
3168
3169 return ret;
3170}
3171
3172static void __exit kgsl_3d_exit(void)
3173{
3174 platform_driver_unregister(&adreno_platform_driver);
3175 platform_driver_unregister(&kgsl_bus_platform_driver);
3176}
3177
3178module_init(kgsl_3d_init);
3179module_exit(kgsl_3d_exit);
3180
3181MODULE_DESCRIPTION("3D Graphics driver");
3182MODULE_LICENSE("GPL v2");
3183MODULE_ALIAS("platform:kgsl_3d");