blob: 13fe0a74f6f5530f3add28df65dbc483da014106 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/module.h>
14#include <linux/uaccess.h>
15#include <linux/sched.h>
16#include <linux/of.h>
17#include <linux/of_device.h>
18#include <linux/delay.h>
19#include <linux/input.h>
Lynus Vaz9ed8cf92017-09-21 21:55:34 +053020#include <linux/io.h>
Shrenuj Bansala419c792016-10-20 14:05:11 -070021#include <soc/qcom/scm.h>
22
23#include <linux/msm-bus-board.h>
24#include <linux/msm-bus.h>
25
26#include "kgsl.h"
27#include "kgsl_pwrscale.h"
28#include "kgsl_sharedmem.h"
29#include "kgsl_iommu.h"
30#include "kgsl_trace.h"
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060031#include "adreno_llc.h"
Shrenuj Bansala419c792016-10-20 14:05:11 -070032
33#include "adreno.h"
34#include "adreno_iommu.h"
35#include "adreno_compat.h"
36#include "adreno_pm4types.h"
37#include "adreno_trace.h"
38
39#include "a3xx_reg.h"
Deepak Kumar84b9e032017-11-08 13:08:50 +053040#include "a6xx_reg.h"
Shrenuj Bansala419c792016-10-20 14:05:11 -070041#include "adreno_snapshot.h"
42
43/* Include the master list of GPU cores that are supported */
44#include "adreno-gpulist.h"
45#include "adreno_dispatch.h"
46
47#undef MODULE_PARAM_PREFIX
48#define MODULE_PARAM_PREFIX "adreno."
49
50static bool nopreempt;
51module_param(nopreempt, bool, 0444);
52MODULE_PARM_DESC(nopreempt, "Disable GPU preemption");
53
Shrenuj Bansalae672812016-02-24 14:17:30 -080054static bool swfdetect;
55module_param(swfdetect, bool, 0444);
56MODULE_PARM_DESC(swfdetect, "Enable soft fault detection");
57
Shrenuj Bansala419c792016-10-20 14:05:11 -070058#define DRIVER_VERSION_MAJOR 3
59#define DRIVER_VERSION_MINOR 1
60
Shrenuj Bansala419c792016-10-20 14:05:11 -070061#define KGSL_LOG_LEVEL_DEFAULT 3
62
63static void adreno_input_work(struct work_struct *work);
64static unsigned int counter_delta(struct kgsl_device *device,
65 unsigned int reg, unsigned int *counter);
66
67static struct devfreq_msm_adreno_tz_data adreno_tz_data = {
68 .bus = {
69 .max = 350,
70 },
71 .device_id = KGSL_DEVICE_3D0,
72};
73
74static const struct kgsl_functable adreno_functable;
75
76static struct adreno_device device_3d0 = {
77 .dev = {
78 KGSL_DEVICE_COMMON_INIT(device_3d0.dev),
79 .pwrscale = KGSL_PWRSCALE_INIT(&adreno_tz_data),
80 .name = DEVICE_3D0_NAME,
81 .id = KGSL_DEVICE_3D0,
Kyle Pieferb1027b02017-02-10 13:58:58 -080082 .gmu = {
83 .load_mode = TCM_BOOT,
84 },
Shrenuj Bansala419c792016-10-20 14:05:11 -070085 .pwrctrl = {
86 .irq_name = "kgsl_3d0_irq",
87 },
88 .iomemname = "kgsl_3d0_reg_memory",
89 .shadermemname = "kgsl_3d0_shader_memory",
90 .ftbl = &adreno_functable,
91 .cmd_log = KGSL_LOG_LEVEL_DEFAULT,
92 .ctxt_log = KGSL_LOG_LEVEL_DEFAULT,
93 .drv_log = KGSL_LOG_LEVEL_DEFAULT,
94 .mem_log = KGSL_LOG_LEVEL_DEFAULT,
95 .pwr_log = KGSL_LOG_LEVEL_DEFAULT,
96 },
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -070097 .fw[0] = {
98 .fwvirt = NULL
99 },
100 .fw[1] = {
101 .fwvirt = NULL
102 },
Shrenuj Bansala419c792016-10-20 14:05:11 -0700103 .gmem_size = SZ_256K,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700104 .ft_policy = KGSL_FT_DEFAULT_POLICY,
105 .ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700106 .long_ib_detect = 1,
107 .input_work = __WORK_INITIALIZER(device_3d0.input_work,
108 adreno_input_work),
109 .pwrctrl_flag = BIT(ADRENO_SPTP_PC_CTRL) | BIT(ADRENO_PPD_CTRL) |
110 BIT(ADRENO_LM_CTRL) | BIT(ADRENO_HWCG_CTRL) |
111 BIT(ADRENO_THROTTLING_CTRL),
112 .profile.enabled = false,
113 .active_list = LIST_HEAD_INIT(device_3d0.active_list),
114 .active_list_lock = __SPIN_LOCK_UNLOCKED(device_3d0.active_list_lock),
Sushmita Susheelendrab1976682016-11-07 14:21:11 -0700115 .gpu_llc_slice_enable = true,
Sushmita Susheelendrad3756c02017-01-11 15:05:40 -0700116 .gpuhtw_llc_slice_enable = true,
Harshdeep Dhatta8ec51c2017-09-21 17:24:08 -0600117 .preempt = {
118 .preempt_level = 1,
119 .skipsaverestore = 1,
120 .usesgmem = 1,
121 },
Harshdeep Dhatteb66fc72017-10-25 14:47:29 -0600122 .priv = BIT(ADRENO_DEVICE_PREEMPTION_EXECUTION),
Shrenuj Bansala419c792016-10-20 14:05:11 -0700123};
124
125/* Ptr to array for the current set of fault detect registers */
126unsigned int *adreno_ft_regs;
127/* Total number of fault detect registers */
128unsigned int adreno_ft_regs_num;
129/* Ptr to array for the current fault detect registers values */
130unsigned int *adreno_ft_regs_val;
131/* Array of default fault detect registers */
132static unsigned int adreno_ft_regs_default[] = {
133 ADRENO_REG_RBBM_STATUS,
134 ADRENO_REG_CP_RB_RPTR,
135 ADRENO_REG_CP_IB1_BASE,
136 ADRENO_REG_CP_IB1_BUFSZ,
137 ADRENO_REG_CP_IB2_BASE,
138 ADRENO_REG_CP_IB2_BUFSZ
139};
140
141/* Nice level for the higher priority GPU start thread */
142int adreno_wake_nice = -7;
143
144/* Number of milliseconds to stay active active after a wake on touch */
145unsigned int adreno_wake_timeout = 100;
146
147/**
148 * adreno_readreg64() - Read a 64bit register by getting its offset from the
149 * offset array defined in gpudev node
150 * @adreno_dev: Pointer to the the adreno device
151 * @lo: lower 32bit register enum that is to be read
152 * @hi: higher 32bit register enum that is to be read
153 * @val: 64 bit Register value read is placed here
154 */
155void adreno_readreg64(struct adreno_device *adreno_dev,
156 enum adreno_regs lo, enum adreno_regs hi, uint64_t *val)
157{
158 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
159 unsigned int val_lo = 0, val_hi = 0;
160 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
161
162 if (adreno_checkreg_off(adreno_dev, lo))
163 kgsl_regread(device, gpudev->reg_offsets->offsets[lo], &val_lo);
164 if (adreno_checkreg_off(adreno_dev, hi))
165 kgsl_regread(device, gpudev->reg_offsets->offsets[hi], &val_hi);
166
167 *val = (val_lo | ((uint64_t)val_hi << 32));
168}
169
170/**
171 * adreno_writereg64() - Write a 64bit register by getting its offset from the
172 * offset array defined in gpudev node
173 * @adreno_dev: Pointer to the the adreno device
174 * @lo: lower 32bit register enum that is to be written
175 * @hi: higher 32bit register enum that is to be written
176 * @val: 64 bit value to write
177 */
178void adreno_writereg64(struct adreno_device *adreno_dev,
179 enum adreno_regs lo, enum adreno_regs hi, uint64_t val)
180{
181 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
182
183 if (adreno_checkreg_off(adreno_dev, lo))
184 kgsl_regwrite(KGSL_DEVICE(adreno_dev),
185 gpudev->reg_offsets->offsets[lo], lower_32_bits(val));
186 if (adreno_checkreg_off(adreno_dev, hi))
187 kgsl_regwrite(KGSL_DEVICE(adreno_dev),
188 gpudev->reg_offsets->offsets[hi], upper_32_bits(val));
189}
190
191/**
192 * adreno_get_rptr() - Get the current ringbuffer read pointer
193 * @rb: Pointer the ringbuffer to query
194 *
195 * Get the latest rptr
196 */
197unsigned int adreno_get_rptr(struct adreno_ringbuffer *rb)
198{
199 struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
200 unsigned int rptr = 0;
201
202 if (adreno_is_a3xx(adreno_dev))
203 adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
204 &rptr);
205 else {
206 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
207
208 kgsl_sharedmem_readl(&device->scratch, &rptr,
209 SCRATCH_RPTR_OFFSET(rb->id));
210 }
211
212 return rptr;
213}
214
215/**
216 * adreno_of_read_property() - Adreno read property
217 * @node: Device node
218 *
219 * Read a u32 property.
220 */
221static inline int adreno_of_read_property(struct device_node *node,
222 const char *prop, unsigned int *ptr)
223{
224 int ret = of_property_read_u32(node, prop, ptr);
225
226 if (ret)
227 KGSL_CORE_ERR("Unable to read '%s'\n", prop);
228 return ret;
229}
230
231static void __iomem *efuse_base;
232static size_t efuse_len;
233
234int adreno_efuse_map(struct adreno_device *adreno_dev)
235{
236 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
237 struct resource *res;
238
239 if (efuse_base != NULL)
240 return 0;
241
242 res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
243 "qfprom_memory");
244
245 if (res == NULL)
246 return -ENODEV;
247
248 efuse_base = ioremap(res->start, resource_size(res));
249 if (efuse_base == NULL)
250 return -ENODEV;
251
252 efuse_len = resource_size(res);
253 return 0;
254}
255
256void adreno_efuse_unmap(struct adreno_device *adreno_dev)
257{
258 if (efuse_base != NULL) {
259 iounmap(efuse_base);
260 efuse_base = NULL;
261 efuse_len = 0;
262 }
263}
264
265int adreno_efuse_read_u32(struct adreno_device *adreno_dev, unsigned int offset,
266 unsigned int *val)
267{
268 if (efuse_base == NULL)
269 return -ENODEV;
270
271 if (offset >= efuse_len)
272 return -ERANGE;
273
274 if (val != NULL) {
275 *val = readl_relaxed(efuse_base + offset);
276 /* Make sure memory is updated before returning */
277 rmb();
278 }
279
280 return 0;
281}
282
283static int _get_counter(struct adreno_device *adreno_dev,
284 int group, int countable, unsigned int *lo,
285 unsigned int *hi)
286{
287 int ret = 0;
288
289 if (*lo == 0) {
290
291 ret = adreno_perfcounter_get(adreno_dev, group, countable,
292 lo, hi, PERFCOUNTER_FLAG_KERNEL);
293
294 if (ret) {
295 KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
296 "Unable to allocate fault detect performance counter %d/%d\n",
297 group, countable);
298 KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
299 "GPU fault detect will be less reliable\n");
300 }
301 }
302
303 return ret;
304}
305
306static inline void _put_counter(struct adreno_device *adreno_dev,
307 int group, int countable, unsigned int *lo,
308 unsigned int *hi)
309{
310 if (*lo != 0)
311 adreno_perfcounter_put(adreno_dev, group, countable,
312 PERFCOUNTER_FLAG_KERNEL);
313
314 *lo = 0;
315 *hi = 0;
316}
317
318/**
319 * adreno_fault_detect_start() - Allocate performance counters
320 * used for fast fault detection
321 * @adreno_dev: Pointer to an adreno_device structure
322 *
323 * Allocate the series of performance counters that should be periodically
324 * checked to verify that the GPU is still moving
325 */
326void adreno_fault_detect_start(struct adreno_device *adreno_dev)
327{
328 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
329 unsigned int i, j = ARRAY_SIZE(adreno_ft_regs_default);
330
331 if (!test_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv))
332 return;
333
334 if (adreno_dev->fast_hang_detect == 1)
335 return;
336
337 for (i = 0; i < gpudev->ft_perf_counters_count; i++) {
338 _get_counter(adreno_dev, gpudev->ft_perf_counters[i].counter,
339 gpudev->ft_perf_counters[i].countable,
340 &adreno_ft_regs[j + (i * 2)],
341 &adreno_ft_regs[j + ((i * 2) + 1)]);
342 }
343
344 adreno_dev->fast_hang_detect = 1;
345}
346
347/**
348 * adreno_fault_detect_stop() - Release performance counters
349 * used for fast fault detection
350 * @adreno_dev: Pointer to an adreno_device structure
351 *
352 * Release the counters allocated in adreno_fault_detect_start
353 */
354void adreno_fault_detect_stop(struct adreno_device *adreno_dev)
355{
356 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
357 unsigned int i, j = ARRAY_SIZE(adreno_ft_regs_default);
358
359 if (!test_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv))
360 return;
361
362 if (!adreno_dev->fast_hang_detect)
363 return;
364
365 for (i = 0; i < gpudev->ft_perf_counters_count; i++) {
366 _put_counter(adreno_dev, gpudev->ft_perf_counters[i].counter,
367 gpudev->ft_perf_counters[i].countable,
368 &adreno_ft_regs[j + (i * 2)],
369 &adreno_ft_regs[j + ((i * 2) + 1)]);
370
371 }
372
373 adreno_dev->fast_hang_detect = 0;
374}
375
376/*
377 * A workqueue callback responsible for actually turning on the GPU after a
378 * touch event. kgsl_pwrctrl_change_state(ACTIVE) is used without any
379 * active_count protection to avoid the need to maintain state. Either
380 * somebody will start using the GPU or the idle timer will fire and put the
381 * GPU back into slumber.
382 */
383static void adreno_input_work(struct work_struct *work)
384{
385 struct adreno_device *adreno_dev = container_of(work,
386 struct adreno_device, input_work);
387 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
388
389 mutex_lock(&device->mutex);
390
391 device->flags |= KGSL_FLAG_WAKE_ON_TOUCH;
392
393 /*
394 * Don't schedule adreno_start in a high priority workqueue, we are
395 * already in a workqueue which should be sufficient
396 */
397 kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
398
399 /*
400 * When waking up from a touch event we want to stay active long enough
401 * for the user to send a draw command. The default idle timer timeout
402 * is shorter than we want so go ahead and push the idle timer out
403 * further for this special case
404 */
405 mod_timer(&device->idle_timer,
406 jiffies + msecs_to_jiffies(adreno_wake_timeout));
407 mutex_unlock(&device->mutex);
408}
409
410/*
411 * Process input events and schedule work if needed. At this point we are only
412 * interested in groking EV_ABS touchscreen events
413 */
414static void adreno_input_event(struct input_handle *handle, unsigned int type,
415 unsigned int code, int value)
416{
417 struct kgsl_device *device = handle->handler->private;
418 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
419
420 /* Only consider EV_ABS (touch) events */
421 if (type != EV_ABS)
422 return;
423
424 /*
425 * Don't do anything if anything hasn't been rendered since we've been
426 * here before
427 */
428
429 if (device->flags & KGSL_FLAG_WAKE_ON_TOUCH)
430 return;
431
432 /*
433 * If the device is in nap, kick the idle timer to make sure that we
434 * don't go into slumber before the first render. If the device is
435 * already in slumber schedule the wake.
436 */
437
438 if (device->state == KGSL_STATE_NAP) {
439 /*
440 * Set the wake on touch bit to keep from coming back here and
441 * keeping the device in nap without rendering
442 */
443
444 device->flags |= KGSL_FLAG_WAKE_ON_TOUCH;
445
446 mod_timer(&device->idle_timer,
447 jiffies + device->pwrctrl.interval_timeout);
448 } else if (device->state == KGSL_STATE_SLUMBER) {
449 schedule_work(&adreno_dev->input_work);
450 }
451}
452
453#ifdef CONFIG_INPUT
454static int adreno_input_connect(struct input_handler *handler,
455 struct input_dev *dev, const struct input_device_id *id)
456{
457 struct input_handle *handle;
458 int ret;
459
460 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
461 if (handle == NULL)
462 return -ENOMEM;
463
464 handle->dev = dev;
465 handle->handler = handler;
466 handle->name = handler->name;
467
468 ret = input_register_handle(handle);
469 if (ret) {
470 kfree(handle);
471 return ret;
472 }
473
474 ret = input_open_device(handle);
475 if (ret) {
476 input_unregister_handle(handle);
477 kfree(handle);
478 }
479
480 return ret;
481}
482
483static void adreno_input_disconnect(struct input_handle *handle)
484{
485 input_close_device(handle);
486 input_unregister_handle(handle);
487 kfree(handle);
488}
489#else
490static int adreno_input_connect(struct input_handler *handler,
491 struct input_dev *dev, const struct input_device_id *id)
492{
493 return 0;
494}
495static void adreno_input_disconnect(struct input_handle *handle) {}
496#endif
497
498/*
499 * We are only interested in EV_ABS events so only register handlers for those
500 * input devices that have EV_ABS events
501 */
502static const struct input_device_id adreno_input_ids[] = {
503 {
504 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
505 .evbit = { BIT_MASK(EV_ABS) },
506 /* assumption: MT_.._X & MT_.._Y are in the same long */
507 .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
508 BIT_MASK(ABS_MT_POSITION_X) |
509 BIT_MASK(ABS_MT_POSITION_Y) },
510 },
511 { },
512};
513
514static struct input_handler adreno_input_handler = {
515 .event = adreno_input_event,
516 .connect = adreno_input_connect,
517 .disconnect = adreno_input_disconnect,
518 .name = "kgsl",
519 .id_table = adreno_input_ids,
520};
521
Shrenuj Bansala419c792016-10-20 14:05:11 -0700522/*
523 * _soft_reset() - Soft reset GPU
524 * @adreno_dev: Pointer to adreno device
525 *
526 * Soft reset the GPU by doing a AHB write of value 1 to RBBM_SW_RESET
527 * register. This is used when we want to reset the GPU without
528 * turning off GFX power rail. The reset when asserted resets
529 * all the HW logic, restores GPU registers to default state and
530 * flushes out pending VBIF transactions.
531 */
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -0700532static int _soft_reset(struct adreno_device *adreno_dev)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700533{
534 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
535 unsigned int reg;
536
537 /*
538 * On a530 v1 RBBM cannot be reset in soft reset.
539 * Reset all blocks except RBBM for a530v1.
540 */
541 if (adreno_is_a530v1(adreno_dev)) {
542 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD,
543 0xFFDFFC0);
544 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
545 0x1FFFFFFF);
546 } else {
547
548 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 1);
549 /*
550 * Do a dummy read to get a brief read cycle delay for the
551 * reset to take effect
552 */
553 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, &reg);
554 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
555 }
556
557 /* The SP/TP regulator gets turned off after a soft reset */
558
559 if (gpudev->regulator_enable)
560 gpudev->regulator_enable(adreno_dev);
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -0700561
562 return 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700563}
564
Kyle Piefer8fe58df2017-09-12 09:19:28 -0700565/**
566 * adreno_irqctrl() - Enables/disables the RBBM interrupt mask
567 * @adreno_dev: Pointer to an adreno_device
568 * @state: 1 for masked or 0 for unmasked
569 * Power: The caller of this function must make sure to use OOBs
570 * so that we know that the GPU is powered on
571 */
Shrenuj Bansala419c792016-10-20 14:05:11 -0700572void adreno_irqctrl(struct adreno_device *adreno_dev, int state)
573{
574 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
575 unsigned int mask = state ? gpudev->irq->mask : 0;
576
577 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK, mask);
578}
579
580/*
581 * adreno_hang_int_callback() - Isr for fatal interrupts that hang GPU
582 * @adreno_dev: Pointer to device
583 * @bit: Interrupt bit
584 */
585void adreno_hang_int_callback(struct adreno_device *adreno_dev, int bit)
586{
587 KGSL_DRV_CRIT_RATELIMIT(KGSL_DEVICE(adreno_dev),
588 "MISC: GPU hang detected\n");
589 adreno_irqctrl(adreno_dev, 0);
590
591 /* Trigger a fault in the dispatcher - this will effect a restart */
592 adreno_set_gpu_fault(adreno_dev, ADRENO_HARD_FAULT);
593 adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
594}
595
596/*
597 * adreno_cp_callback() - CP interrupt handler
598 * @adreno_dev: Adreno device pointer
599 * @irq: irq number
600 *
601 * Handle the cp interrupt generated by GPU.
602 */
603void adreno_cp_callback(struct adreno_device *adreno_dev, int bit)
604{
605 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
606
607 adreno_dispatcher_schedule(device);
608}
609
610static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
611{
612 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
613 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
614 struct adreno_irq *irq_params = gpudev->irq;
615 irqreturn_t ret = IRQ_NONE;
Kyle Piefer5e1b78bd2017-10-19 13:22:10 -0700616 unsigned int status = 0, fence = 0, fence_retries = 0, tmp, int_bit;
Harshdeep Dhatte8046962017-11-10 15:45:24 -0700617 unsigned int status_retries = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700618 int i;
619
Deepak Kumar273c5712017-01-03 21:49:03 +0530620 atomic_inc(&adreno_dev->pending_irq_refcnt);
621 /* Ensure this increment is done before the IRQ status is updated */
622 smp_mb__after_atomic();
623
Carter Cooperdf7ba702017-03-20 11:28:04 -0600624 /*
625 * On A6xx, the GPU can power down once the INT_0_STATUS is read
626 * below. But there still might be some register reads required
627 * so force the GMU/GPU into KEEPALIVE mode until done with the ISR.
628 */
629 if (gpudev->gpu_keepalive)
630 gpudev->gpu_keepalive(adreno_dev, true);
631
Kyle Pieferda0fa542017-08-04 13:39:40 -0700632 /*
633 * If the AHB fence is not in ALLOW mode when we receive an RBBM
Kyle Piefer5e1b78bd2017-10-19 13:22:10 -0700634 * interrupt, something went wrong. This means that we cannot proceed
635 * since the IRQ status and clear registers are not accessible.
636 * This is usually harmless because the GMU will abort power collapse
637 * and change the fence back to ALLOW. Poll so that this can happen.
Kyle Pieferda0fa542017-08-04 13:39:40 -0700638 */
Kyle Piefer5e1b78bd2017-10-19 13:22:10 -0700639 if (kgsl_gmu_isenabled(device)) {
640 do {
641 adreno_readreg(adreno_dev,
642 ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
643 &fence);
644
645 if (fence_retries == FENCE_RETRY_MAX) {
646 KGSL_DRV_CRIT_RATELIMIT(device,
647 "AHB fence stuck in ISR\n");
648 return ret;
649 }
650 fence_retries++;
651 } while (fence != 0);
Kyle Pieferda0fa542017-08-04 13:39:40 -0700652 }
653
Shrenuj Bansala419c792016-10-20 14:05:11 -0700654 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
655
656 /*
Harshdeep Dhatte8046962017-11-10 15:45:24 -0700657 * Read status again to make sure the bits aren't transitory.
658 * Transitory bits mean that they are spurious interrupts and are
659 * seen while preemption is on going. Empirical experiments have
660 * shown that the transitory bits are a timing thing and they
661 * go away in the small time window between two or three consecutive
662 * reads. If they don't go away, log the message and return.
663 */
664 while (status_retries < STATUS_RETRY_MAX) {
665 unsigned int new_status;
666
667 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS,
668 &new_status);
669
670 if (status == new_status)
671 break;
672
673 status = new_status;
674 status_retries++;
675 }
676
677 if (status_retries == STATUS_RETRY_MAX) {
678 KGSL_DRV_CRIT_RATELIMIT(device, "STATUS bits are not stable\n");
679 return ret;
680 }
681
682 /*
Shrenuj Bansala419c792016-10-20 14:05:11 -0700683 * Clear all the interrupt bits but ADRENO_INT_RBBM_AHB_ERROR. Because
684 * even if we clear it here, it will stay high until it is cleared
685 * in its respective handler. Otherwise, the interrupt handler will
686 * fire again.
687 */
688 int_bit = ADRENO_INT_BIT(adreno_dev, ADRENO_INT_RBBM_AHB_ERROR);
689 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
690 status & ~int_bit);
691
692 /* Loop through all set interrupts and call respective handlers */
693 for (tmp = status; tmp != 0;) {
694 i = fls(tmp) - 1;
695
696 if (irq_params->funcs[i].func != NULL) {
697 if (irq_params->mask & BIT(i))
698 irq_params->funcs[i].func(adreno_dev, i);
699 } else
700 KGSL_DRV_CRIT_RATELIMIT(device,
701 "Unhandled interrupt bit %x\n", i);
702
703 ret = IRQ_HANDLED;
704
705 tmp &= ~BIT(i);
706 }
707
708 gpudev->irq_trace(adreno_dev, status);
709
710 /*
711 * Clear ADRENO_INT_RBBM_AHB_ERROR bit after this interrupt has been
712 * cleared in its respective handler
713 */
714 if (status & int_bit)
715 adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
716 int_bit);
717
Carter Cooperdf7ba702017-03-20 11:28:04 -0600718 /* Turn off the KEEPALIVE vote from earlier unless hard fault set */
719 if (gpudev->gpu_keepalive) {
720 /* If hard fault, then let snapshot turn off the keepalive */
721 if (!(adreno_gpu_fault(adreno_dev) & ADRENO_HARD_FAULT))
722 gpudev->gpu_keepalive(adreno_dev, false);
723 }
724
Deepak Kumar273c5712017-01-03 21:49:03 +0530725 /* Make sure the regwrites are done before the decrement */
726 smp_mb__before_atomic();
727 atomic_dec(&adreno_dev->pending_irq_refcnt);
728 /* Ensure other CPUs see the decrement */
729 smp_mb__after_atomic();
730
Shrenuj Bansala419c792016-10-20 14:05:11 -0700731 return ret;
732
733}
734
735static inline bool _rev_match(unsigned int id, unsigned int entry)
736{
737 return (entry == ANY_ID || entry == id);
738}
739
740static inline const struct adreno_gpu_core *_get_gpu_core(unsigned int chipid)
741{
742 unsigned int core = ADRENO_CHIPID_CORE(chipid);
743 unsigned int major = ADRENO_CHIPID_MAJOR(chipid);
744 unsigned int minor = ADRENO_CHIPID_MINOR(chipid);
745 unsigned int patchid = ADRENO_CHIPID_PATCH(chipid);
746 int i;
747
748 for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) {
749 if (core == adreno_gpulist[i].core &&
750 _rev_match(major, adreno_gpulist[i].major) &&
751 _rev_match(minor, adreno_gpulist[i].minor) &&
752 _rev_match(patchid, adreno_gpulist[i].patchid))
753 return &adreno_gpulist[i];
754 }
755
756 return NULL;
757}
758
759static void
760adreno_identify_gpu(struct adreno_device *adreno_dev)
761{
762 const struct adreno_reg_offsets *reg_offsets;
763 struct adreno_gpudev *gpudev;
764 int i;
765
766 if (kgsl_property_read_u32(KGSL_DEVICE(adreno_dev), "qcom,chipid",
767 &adreno_dev->chipid))
768 KGSL_DRV_FATAL(KGSL_DEVICE(adreno_dev),
769 "No GPU chip ID was specified\n");
770
771 adreno_dev->gpucore = _get_gpu_core(adreno_dev->chipid);
772
773 if (adreno_dev->gpucore == NULL)
774 KGSL_DRV_FATAL(KGSL_DEVICE(adreno_dev),
775 "Unknown GPU chip ID %8.8X\n", adreno_dev->chipid);
776
777 /*
778 * The gmem size might be dynamic when ocmem is involved so copy it out
779 * of the gpu device
780 */
781
782 adreno_dev->gmem_size = adreno_dev->gpucore->gmem_size;
783
784 /*
785 * Initialize uninitialzed gpu registers, only needs to be done once
786 * Make all offsets that are not initialized to ADRENO_REG_UNUSED
787 */
788
789 gpudev = ADRENO_GPU_DEVICE(adreno_dev);
790 reg_offsets = gpudev->reg_offsets;
791
792 for (i = 0; i < ADRENO_REG_REGISTER_MAX; i++) {
793 if (reg_offsets->offset_0 != i && !reg_offsets->offsets[i])
794 reg_offsets->offsets[i] = ADRENO_REG_UNUSED;
795 }
796
797 /* Do target specific identification */
798 if (gpudev->platform_setup != NULL)
799 gpudev->platform_setup(adreno_dev);
800}
801
802static const struct platform_device_id adreno_id_table[] = {
803 { DEVICE_3D0_NAME, (unsigned long) &device_3d0, },
804 {},
805};
806
807MODULE_DEVICE_TABLE(platform, adreno_id_table);
808
809static const struct of_device_id adreno_match_table[] = {
810 { .compatible = "qcom,kgsl-3d0", .data = &device_3d0 },
811 {}
812};
813
814static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
815 struct device_node *node)
816{
817 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
818 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
819 struct device_node *child;
820
821 pwr->num_pwrlevels = 0;
822
823 for_each_child_of_node(node, child) {
824 unsigned int index;
825 struct kgsl_pwrlevel *level;
826
827 if (adreno_of_read_property(child, "reg", &index))
828 return -EINVAL;
829
830 if (index >= KGSL_MAX_PWRLEVELS) {
831 KGSL_CORE_ERR("Pwrlevel index %d is out of range\n",
832 index);
833 continue;
834 }
835
836 if (index >= pwr->num_pwrlevels)
837 pwr->num_pwrlevels = index + 1;
838
839 level = &pwr->pwrlevels[index];
840
841 if (adreno_of_read_property(child, "qcom,gpu-freq",
842 &level->gpu_freq))
843 return -EINVAL;
844
845 if (adreno_of_read_property(child, "qcom,bus-freq",
846 &level->bus_freq))
847 return -EINVAL;
848
849 if (of_property_read_u32(child, "qcom,bus-min",
850 &level->bus_min))
851 level->bus_min = level->bus_freq;
852
853 if (of_property_read_u32(child, "qcom,bus-max",
854 &level->bus_max))
855 level->bus_max = level->bus_freq;
856 }
857
858 return 0;
859}
860
861
862static void adreno_of_get_initial_pwrlevel(struct adreno_device *adreno_dev,
863 struct device_node *node)
864{
865 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
866 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
867 int init_level = 1;
868
869 of_property_read_u32(node, "qcom,initial-pwrlevel", &init_level);
870
871 if (init_level < 0 || init_level > pwr->num_pwrlevels)
872 init_level = 1;
873
874 pwr->active_pwrlevel = init_level;
875 pwr->default_pwrlevel = init_level;
876}
877
878static int adreno_of_get_legacy_pwrlevels(struct adreno_device *adreno_dev,
879 struct device_node *parent)
880{
881 struct device_node *node;
882 int ret;
883
884 node = of_find_node_by_name(parent, "qcom,gpu-pwrlevels");
885
886 if (node == NULL) {
887 KGSL_CORE_ERR("Unable to find 'qcom,gpu-pwrlevels'\n");
888 return -EINVAL;
889 }
890
891 ret = adreno_of_parse_pwrlevels(adreno_dev, node);
892 if (ret == 0)
893 adreno_of_get_initial_pwrlevel(adreno_dev, parent);
894 return ret;
895}
896
897static int adreno_of_get_pwrlevels(struct adreno_device *adreno_dev,
898 struct device_node *parent)
899{
900 struct device_node *node, *child;
Hareesh Gundu7d536522017-08-24 20:20:56 +0530901 unsigned int bin = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700902
903 node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
904 if (node == NULL)
905 return adreno_of_get_legacy_pwrlevels(adreno_dev, parent);
906
907 for_each_child_of_node(node, child) {
Shrenuj Bansala419c792016-10-20 14:05:11 -0700908
909 if (of_property_read_u32(child, "qcom,speed-bin", &bin))
910 continue;
911
912 if (bin == adreno_dev->speed_bin) {
913 int ret;
914
915 ret = adreno_of_parse_pwrlevels(adreno_dev, child);
916 if (ret == 0)
917 adreno_of_get_initial_pwrlevel(adreno_dev,
918 child);
919 return ret;
920 }
921 }
922
Hareesh Gundu7d536522017-08-24 20:20:56 +0530923 KGSL_CORE_ERR("GPU speed_bin:%d mismatch for efused bin:%d\n",
924 adreno_dev->speed_bin, bin);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700925 return -ENODEV;
926}
927
928static inline struct adreno_device *adreno_get_dev(struct platform_device *pdev)
929{
930 const struct of_device_id *of_id =
931 of_match_device(adreno_match_table, &pdev->dev);
932
933 return of_id ? (struct adreno_device *) of_id->data : NULL;
934}
935
936static struct {
937 unsigned int quirk;
938 const char *prop;
939} adreno_quirks[] = {
940 { ADRENO_QUIRK_TWO_PASS_USE_WFI, "qcom,gpu-quirk-two-pass-use-wfi" },
941 { ADRENO_QUIRK_IOMMU_SYNC, "qcom,gpu-quirk-iommu-sync" },
942 { ADRENO_QUIRK_CRITICAL_PACKETS, "qcom,gpu-quirk-critical-packets" },
943 { ADRENO_QUIRK_FAULT_DETECT_MASK, "qcom,gpu-quirk-fault-detect-mask" },
944 { ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING,
945 "qcom,gpu-quirk-dp2clockgating-disable" },
946 { ADRENO_QUIRK_DISABLE_LMLOADKILL,
947 "qcom,gpu-quirk-lmloadkill-disable" },
Kyle Pieferb1027b02017-02-10 13:58:58 -0800948 { ADRENO_QUIRK_HFI_USE_REG, "qcom,gpu-quirk-hfi-use-reg" },
Carter Cooper6682ead2017-09-28 14:52:53 -0600949 { ADRENO_QUIRK_SECVID_SET_ONCE, "qcom,gpu-quirk-secvid-set-once" },
Shrenuj Bansala419c792016-10-20 14:05:11 -0700950};
951
952static int adreno_of_get_power(struct adreno_device *adreno_dev,
953 struct platform_device *pdev)
954{
955 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
956 struct device_node *node = pdev->dev.of_node;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800957 struct resource *res;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700958 int i;
959 unsigned int timeout;
960
961 if (of_property_read_string(node, "label", &pdev->name)) {
962 KGSL_CORE_ERR("Unable to read 'label'\n");
963 return -EINVAL;
964 }
965
966 if (adreno_of_read_property(node, "qcom,id", &pdev->id))
967 return -EINVAL;
968
969 /* Set up quirks and other boolean options */
970 for (i = 0; i < ARRAY_SIZE(adreno_quirks); i++) {
971 if (of_property_read_bool(node, adreno_quirks[i].prop))
972 adreno_dev->quirks |= adreno_quirks[i].quirk;
973 }
974
Kyle Pieferb1027b02017-02-10 13:58:58 -0800975 /* Get starting physical address of device registers */
976 res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
977 device->iomemname);
978 if (res == NULL) {
979 KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
980 return -EINVAL;
981 }
982 if (res->start == 0 || resource_size(res) == 0) {
983 KGSL_DRV_ERR(device, "dev %d invalid register region\n",
984 device->id);
985 return -EINVAL;
986 }
987
988 device->reg_phys = res->start;
989 device->reg_len = resource_size(res);
990
Shrenuj Bansala419c792016-10-20 14:05:11 -0700991 if (adreno_of_get_pwrlevels(adreno_dev, node))
992 return -EINVAL;
993
994 /* get pm-qos-active-latency, set it to default if not found */
995 if (of_property_read_u32(node, "qcom,pm-qos-active-latency",
996 &device->pwrctrl.pm_qos_active_latency))
997 device->pwrctrl.pm_qos_active_latency = 501;
998
999 /* get pm-qos-cpu-mask-latency, set it to default if not found */
1000 if (of_property_read_u32(node, "qcom,l2pc-cpu-mask-latency",
1001 &device->pwrctrl.pm_qos_cpu_mask_latency))
1002 device->pwrctrl.pm_qos_cpu_mask_latency = 501;
1003
1004 /* get pm-qos-wakeup-latency, set it to default if not found */
1005 if (of_property_read_u32(node, "qcom,pm-qos-wakeup-latency",
1006 &device->pwrctrl.pm_qos_wakeup_latency))
1007 device->pwrctrl.pm_qos_wakeup_latency = 101;
1008
1009 if (of_property_read_u32(node, "qcom,idle-timeout", &timeout))
1010 timeout = 80;
1011
1012 device->pwrctrl.interval_timeout = msecs_to_jiffies(timeout);
1013
1014 device->pwrctrl.bus_control = of_property_read_bool(node,
1015 "qcom,bus-control");
1016
Hareesh Gundu5648ead2017-07-28 16:48:00 +05301017 device->pwrctrl.input_disable = of_property_read_bool(node,
1018 "qcom,disable-wake-on-touch");
1019
Shrenuj Bansala419c792016-10-20 14:05:11 -07001020 return 0;
1021}
1022
1023#ifdef CONFIG_QCOM_OCMEM
1024static int
1025adreno_ocmem_malloc(struct adreno_device *adreno_dev)
1026{
1027 if (!ADRENO_FEATURE(adreno_dev, ADRENO_USES_OCMEM))
1028 return 0;
1029
1030 if (adreno_dev->ocmem_hdl == NULL) {
1031 adreno_dev->ocmem_hdl =
1032 ocmem_allocate(OCMEM_GRAPHICS, adreno_dev->gmem_size);
1033 if (IS_ERR_OR_NULL(adreno_dev->ocmem_hdl)) {
1034 adreno_dev->ocmem_hdl = NULL;
1035 return -ENOMEM;
1036 }
1037
1038 adreno_dev->gmem_size = adreno_dev->ocmem_hdl->len;
1039 adreno_dev->gmem_base = adreno_dev->ocmem_hdl->addr;
1040 }
1041
1042 return 0;
1043}
1044
1045static void
1046adreno_ocmem_free(struct adreno_device *adreno_dev)
1047{
1048 if (adreno_dev->ocmem_hdl != NULL) {
1049 ocmem_free(OCMEM_GRAPHICS, adreno_dev->ocmem_hdl);
1050 adreno_dev->ocmem_hdl = NULL;
1051 }
1052}
1053#else
1054static int
1055adreno_ocmem_malloc(struct adreno_device *adreno_dev)
1056{
1057 return 0;
1058}
1059
1060static void
1061adreno_ocmem_free(struct adreno_device *adreno_dev)
1062{
1063}
1064#endif
1065
Lynus Vaz9ed8cf92017-09-21 21:55:34 +05301066static void adreno_cx_dbgc_probe(struct kgsl_device *device)
1067{
1068 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1069 struct resource *res;
1070
1071 res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
1072 "kgsl_3d0_cx_dbgc_memory");
1073
1074 if (res == NULL)
1075 return;
1076
1077 adreno_dev->cx_dbgc_base = res->start - device->reg_phys;
1078 adreno_dev->cx_dbgc_len = resource_size(res);
1079 adreno_dev->cx_dbgc_virt = devm_ioremap(device->dev,
1080 device->reg_phys +
1081 adreno_dev->cx_dbgc_base,
1082 adreno_dev->cx_dbgc_len);
1083
1084 if (adreno_dev->cx_dbgc_virt == NULL)
1085 KGSL_DRV_WARN(device, "cx_dbgc ioremap failed\n");
1086}
1087
Shrenuj Bansala419c792016-10-20 14:05:11 -07001088static int adreno_probe(struct platform_device *pdev)
1089{
1090 struct kgsl_device *device;
1091 struct adreno_device *adreno_dev;
1092 int status;
1093
1094 adreno_dev = adreno_get_dev(pdev);
1095
1096 if (adreno_dev == NULL) {
1097 pr_err("adreno: qcom,kgsl-3d0 does not exist in the device tree");
1098 return -ENODEV;
1099 }
1100
1101 device = KGSL_DEVICE(adreno_dev);
1102 device->pdev = pdev;
1103
1104 /* Get the chip ID from the DT and set up target specific parameters */
1105 adreno_identify_gpu(adreno_dev);
1106
1107 status = adreno_of_get_power(adreno_dev, pdev);
1108 if (status) {
1109 device->pdev = NULL;
1110 return status;
1111 }
1112
1113 /*
Kyle Pieferb1027b02017-02-10 13:58:58 -08001114 * Probe/init GMU after initial gpu power probe
1115 * Another part of GPU power probe in platform_probe
1116 * needs GMU initialized.
1117 */
1118 status = gmu_probe(device);
1119 if (status != 0 && status != -ENXIO) {
1120 device->pdev = NULL;
1121 return status;
1122 }
1123
1124 /*
Shrenuj Bansala419c792016-10-20 14:05:11 -07001125 * The SMMU APIs use unsigned long for virtual addresses which means
1126 * that we cannot use 64 bit virtual addresses on a 32 bit kernel even
1127 * though the hardware and the rest of the KGSL driver supports it.
1128 */
1129 if (adreno_support_64bit(adreno_dev))
1130 device->mmu.features |= KGSL_MMU_64BIT;
1131
1132 status = kgsl_device_platform_probe(device);
1133 if (status) {
1134 device->pdev = NULL;
1135 return status;
1136 }
1137
Lynus Vaz9ed8cf92017-09-21 21:55:34 +05301138 /* Probe for the optional CX_DBGC block */
1139 adreno_cx_dbgc_probe(device);
1140
Shrenuj Bansala419c792016-10-20 14:05:11 -07001141 /*
1142 * qcom,iommu-secure-id is used to identify MMUs that can handle secure
1143 * content but that is only part of the story - the GPU also has to be
1144 * able to handle secure content. Unfortunately in a classic catch-22
1145 * we cannot identify the GPU until after the DT is parsed. tl;dr -
1146 * check the GPU capabilities here and modify mmu->secured accordingly
1147 */
1148
1149 if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
1150 device->mmu.secured = false;
1151
Shrenuj Bansal4fd6a562017-08-07 15:12:54 -07001152 if (ADRENO_FEATURE(adreno_dev, ADRENO_IOCOHERENT))
1153 device->mmu.features |= KGSL_MMU_IO_COHERENT;
1154
Shrenuj Bansala419c792016-10-20 14:05:11 -07001155 status = adreno_ringbuffer_probe(adreno_dev, nopreempt);
1156 if (status)
1157 goto out;
1158
1159 status = adreno_dispatcher_init(adreno_dev);
1160 if (status)
1161 goto out;
1162
1163 adreno_debugfs_init(adreno_dev);
1164 adreno_profile_init(adreno_dev);
1165
1166 adreno_sysfs_init(adreno_dev);
1167
1168 kgsl_pwrscale_init(&pdev->dev, CONFIG_QCOM_ADRENO_DEFAULT_GOVERNOR);
1169
1170 /* Initialize coresight for the target */
1171 adreno_coresight_init(adreno_dev);
1172
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001173 /* Get the system cache slice descriptor for GPU */
1174 adreno_dev->gpu_llc_slice = adreno_llc_getd(&pdev->dev, "gpu");
1175 if (IS_ERR(adreno_dev->gpu_llc_slice)) {
1176 KGSL_DRV_WARN(device,
1177 "Failed to get GPU LLC slice descriptor (%ld)\n",
1178 PTR_ERR(adreno_dev->gpu_llc_slice));
1179 adreno_dev->gpu_llc_slice = NULL;
1180 }
1181
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001182 /* Get the system cache slice descriptor for GPU pagetables */
1183 adreno_dev->gpuhtw_llc_slice = adreno_llc_getd(&pdev->dev, "gpuhtw");
1184 if (IS_ERR(adreno_dev->gpuhtw_llc_slice)) {
1185 KGSL_DRV_WARN(device,
1186 "Failed to get gpuhtw LLC slice descriptor (%ld)\n",
1187 PTR_ERR(adreno_dev->gpuhtw_llc_slice));
1188 adreno_dev->gpuhtw_llc_slice = NULL;
1189 }
1190
Shrenuj Bansala419c792016-10-20 14:05:11 -07001191#ifdef CONFIG_INPUT
Hareesh Gundu5648ead2017-07-28 16:48:00 +05301192 if (!device->pwrctrl.input_disable) {
1193 adreno_input_handler.private = device;
1194 /*
1195 * It isn't fatal if we cannot register the input handler. Sad,
1196 * perhaps, but not fatal
1197 */
1198 if (input_register_handler(&adreno_input_handler)) {
1199 adreno_input_handler.private = NULL;
1200 KGSL_DRV_ERR(device,
1201 "Unable to register the input handler\n");
1202 }
1203 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07001204#endif
1205out:
1206 if (status) {
1207 adreno_ringbuffer_close(adreno_dev);
1208 kgsl_device_platform_remove(device);
1209 device->pdev = NULL;
1210 }
1211
1212 return status;
1213}
1214
1215static void _adreno_free_memories(struct adreno_device *adreno_dev)
1216{
1217 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001218 struct adreno_firmware *pfp_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
1219 struct adreno_firmware *pm4_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001220
1221 if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv))
1222 kgsl_free_global(device, &adreno_dev->profile_buffer);
1223
1224 /* Free local copies of firmware and other command streams */
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001225 kfree(pfp_fw->fwvirt);
1226 pfp_fw->fwvirt = NULL;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001227
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001228 kfree(pm4_fw->fwvirt);
1229 pm4_fw->fwvirt = NULL;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001230
1231 kfree(adreno_dev->gpmu_cmds);
1232 adreno_dev->gpmu_cmds = NULL;
1233
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07001234 kgsl_free_global(device, &pfp_fw->memdesc);
1235 kgsl_free_global(device, &pm4_fw->memdesc);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001236}
1237
1238static int adreno_remove(struct platform_device *pdev)
1239{
1240 struct adreno_device *adreno_dev = adreno_get_dev(pdev);
1241 struct adreno_gpudev *gpudev;
1242 struct kgsl_device *device;
1243
1244 if (adreno_dev == NULL)
1245 return 0;
1246
1247 device = KGSL_DEVICE(adreno_dev);
1248 gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1249
1250 if (gpudev->remove != NULL)
1251 gpudev->remove(adreno_dev);
1252
1253 /* The memory is fading */
1254 _adreno_free_memories(adreno_dev);
1255
1256#ifdef CONFIG_INPUT
Hareesh Gundu5648ead2017-07-28 16:48:00 +05301257 if (adreno_input_handler.private)
1258 input_unregister_handler(&adreno_input_handler);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001259#endif
1260 adreno_sysfs_close(adreno_dev);
1261
1262 adreno_coresight_remove(adreno_dev);
1263 adreno_profile_close(adreno_dev);
1264
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001265 /* Release the system cache slice descriptor */
1266 if (adreno_dev->gpu_llc_slice)
1267 adreno_llc_putd(adreno_dev->gpu_llc_slice);
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001268 if (adreno_dev->gpuhtw_llc_slice)
1269 adreno_llc_putd(adreno_dev->gpuhtw_llc_slice);
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001270
Shrenuj Bansala419c792016-10-20 14:05:11 -07001271 kgsl_pwrscale_close(device);
1272
1273 adreno_dispatcher_close(adreno_dev);
1274 adreno_ringbuffer_close(adreno_dev);
1275
1276 adreno_fault_detect_stop(adreno_dev);
1277
1278 kfree(adreno_ft_regs);
1279 adreno_ft_regs = NULL;
1280
1281 kfree(adreno_ft_regs_val);
1282 adreno_ft_regs_val = NULL;
1283
1284 if (efuse_base != NULL)
1285 iounmap(efuse_base);
1286
1287 adreno_perfcounter_close(adreno_dev);
1288 kgsl_device_platform_remove(device);
1289
Kyle Pieferb1027b02017-02-10 13:58:58 -08001290 gmu_remove(device);
1291
Shrenuj Bansala419c792016-10-20 14:05:11 -07001292 if (test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv)) {
1293 kgsl_free_global(device, &adreno_dev->pwron_fixup);
1294 clear_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv);
1295 }
1296 clear_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
1297
1298 return 0;
1299}
1300
1301static void adreno_fault_detect_init(struct adreno_device *adreno_dev)
1302{
1303 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Shrenuj Bansalae672812016-02-24 14:17:30 -08001304 int i;
1305
1306 if (!(swfdetect ||
1307 ADRENO_FEATURE(adreno_dev, ADRENO_SOFT_FAULT_DETECT)))
1308 return;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001309
1310 /* Disable the fast hang detect bit until we know its a go */
1311 adreno_dev->fast_hang_detect = 0;
1312
1313 adreno_ft_regs_num = (ARRAY_SIZE(adreno_ft_regs_default) +
1314 gpudev->ft_perf_counters_count*2);
1315
1316 adreno_ft_regs = kcalloc(adreno_ft_regs_num, sizeof(unsigned int),
1317 GFP_KERNEL);
1318 adreno_ft_regs_val = kcalloc(adreno_ft_regs_num, sizeof(unsigned int),
1319 GFP_KERNEL);
1320
1321 if (adreno_ft_regs == NULL || adreno_ft_regs_val == NULL) {
1322 kfree(adreno_ft_regs);
1323 kfree(adreno_ft_regs_val);
1324
1325 adreno_ft_regs = NULL;
1326 adreno_ft_regs_val = NULL;
1327
1328 return;
1329 }
1330
1331 for (i = 0; i < ARRAY_SIZE(adreno_ft_regs_default); i++)
1332 adreno_ft_regs[i] = adreno_getreg(adreno_dev,
1333 adreno_ft_regs_default[i]);
1334
1335 set_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv);
1336
Shrenuj Bansalae672812016-02-24 14:17:30 -08001337 adreno_fault_detect_start(adreno_dev);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001338}
1339
1340static int adreno_init(struct kgsl_device *device)
1341{
1342 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1343 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1344 int ret;
1345
Hareesh Gundu6aae7e22017-08-22 18:55:50 +05301346 if (!adreno_is_a3xx(adreno_dev))
1347 kgsl_sharedmem_set(device, &device->scratch, 0, 0,
1348 device->scratch.size);
1349
Shrenuj Bansala419c792016-10-20 14:05:11 -07001350 ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
1351 if (ret)
1352 return ret;
1353
1354 /*
1355 * initialization only needs to be done once initially until
1356 * device is shutdown
1357 */
1358 if (test_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv))
1359 return 0;
1360
1361 /*
1362 * Either the microcode read failed because the usermodehelper isn't
1363 * available or the microcode was corrupted. Fail the init and force
1364 * the user to try the open() again
1365 */
1366
1367 ret = gpudev->microcode_read(adreno_dev);
1368 if (ret)
1369 return ret;
1370
1371 /* Put the GPU in a responsive state */
George Shen3726c812017-05-12 11:06:03 -07001372 if (ADRENO_GPUREV(adreno_dev) < 600) {
1373 /* No need for newer generation architectures */
1374 ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
1375 if (ret)
1376 return ret;
1377 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07001378
1379 ret = adreno_iommu_init(adreno_dev);
1380 if (ret)
1381 return ret;
1382
1383 adreno_perfcounter_init(adreno_dev);
1384 adreno_fault_detect_init(adreno_dev);
1385
1386 /* Power down the device */
George Shen3726c812017-05-12 11:06:03 -07001387 if (ADRENO_GPUREV(adreno_dev) < 600)
1388 kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001389
1390 if (gpudev->init != NULL)
1391 gpudev->init(adreno_dev);
1392
1393 set_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
1394
1395 /* Use shader offset and length defined in gpudev */
1396 if (adreno_dev->gpucore->shader_offset &&
1397 adreno_dev->gpucore->shader_size) {
1398
1399 if (device->shader_mem_phys || device->shader_mem_virt)
1400 KGSL_DRV_ERR(device,
1401 "Shader memory already specified in device tree\n");
1402 else {
1403 device->shader_mem_phys = device->reg_phys +
1404 adreno_dev->gpucore->shader_offset;
1405 device->shader_mem_virt = device->reg_virt +
1406 adreno_dev->gpucore->shader_offset;
1407 device->shader_mem_len =
1408 adreno_dev->gpucore->shader_size;
1409 }
1410 }
1411
1412 /*
1413 * Allocate a small chunk of memory for precise drawobj profiling for
1414 * those targets that have the always on timer
1415 */
1416
1417 if (!adreno_is_a3xx(adreno_dev)) {
1418 int r = kgsl_allocate_global(device,
1419 &adreno_dev->profile_buffer, PAGE_SIZE,
1420 0, 0, "alwayson");
1421
1422 adreno_dev->profile_index = 0;
1423
1424 if (r == 0) {
1425 set_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE,
1426 &adreno_dev->priv);
1427 kgsl_sharedmem_set(device,
1428 &adreno_dev->profile_buffer, 0, 0,
1429 PAGE_SIZE);
1430 }
1431
1432 }
1433
Harshdeep Dhatt38e57d72017-08-30 13:24:07 -06001434 if (nopreempt == false) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07001435 int r = 0;
1436
1437 if (gpudev->preemption_init)
1438 r = gpudev->preemption_init(adreno_dev);
1439
1440 if (r == 0)
1441 set_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
1442 else
1443 WARN(1, "adreno: GPU preemption is disabled\n");
1444 }
1445
1446 return 0;
1447}
1448
1449static bool regulators_left_on(struct kgsl_device *device)
1450{
1451 int i;
1452
George Shen3726c812017-05-12 11:06:03 -07001453 if (kgsl_gmu_isenabled(device))
1454 return false;
1455
Shrenuj Bansala419c792016-10-20 14:05:11 -07001456 for (i = 0; i < KGSL_MAX_REGULATORS; i++) {
1457 struct kgsl_regulator *regulator =
1458 &device->pwrctrl.regulators[i];
1459
1460 if (IS_ERR_OR_NULL(regulator->reg))
1461 break;
1462
1463 if (regulator_is_enabled(regulator->reg))
1464 return true;
1465 }
1466
1467 return false;
1468}
1469
1470static void _set_secvid(struct kgsl_device *device)
1471{
1472 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Carter Cooper6682ead2017-09-28 14:52:53 -06001473 static bool set;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001474
1475 /* Program GPU contect protection init values */
Carter Cooper6682ead2017-09-28 14:52:53 -06001476 if (device->mmu.secured && !set) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07001477 if (adreno_is_a4xx(adreno_dev))
1478 adreno_writereg(adreno_dev,
1479 ADRENO_REG_RBBM_SECVID_TRUST_CONFIG, 0x2);
1480 adreno_writereg(adreno_dev,
1481 ADRENO_REG_RBBM_SECVID_TSB_CONTROL, 0x0);
1482
1483 adreno_writereg64(adreno_dev,
1484 ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
1485 ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
1486 KGSL_IOMMU_SECURE_BASE);
1487 adreno_writereg(adreno_dev,
1488 ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
1489 KGSL_IOMMU_SECURE_SIZE);
Carter Cooper6682ead2017-09-28 14:52:53 -06001490 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_SECVID_SET_ONCE))
1491 set = true;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001492 }
1493}
1494
Carter Cooper1d8f5472017-03-15 15:01:09 -06001495static int adreno_switch_to_unsecure_mode(struct adreno_device *adreno_dev,
1496 struct adreno_ringbuffer *rb)
1497{
1498 unsigned int *cmds;
1499 int ret;
1500
1501 cmds = adreno_ringbuffer_allocspace(rb, 2);
1502 if (IS_ERR(cmds))
1503 return PTR_ERR(cmds);
1504 if (cmds == NULL)
1505 return -ENOSPC;
1506
1507 cmds += cp_secure_mode(adreno_dev, cmds, 0);
1508
1509 ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
1510 if (ret)
1511 adreno_spin_idle_debug(adreno_dev,
1512 "Switch to unsecure failed to idle\n");
1513
1514 return ret;
1515}
1516
1517int adreno_set_unsecured_mode(struct adreno_device *adreno_dev,
1518 struct adreno_ringbuffer *rb)
1519{
1520 int ret = 0;
1521
Carter Cooper4a313ae2017-02-23 11:11:56 -07001522 if (!adreno_is_a5xx(adreno_dev) && !adreno_is_a6xx(adreno_dev))
Carter Cooper1d8f5472017-03-15 15:01:09 -06001523 return -EINVAL;
1524
1525 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS) &&
1526 adreno_is_a5xx(adreno_dev)) {
1527 ret = a5xx_critical_packet_submit(adreno_dev, rb);
1528 if (ret)
1529 return ret;
1530 }
1531
1532 /* GPU comes up in secured mode, make it unsecured by default */
Harshdeep Dhatta9e0d762017-05-10 14:16:42 -06001533 if (adreno_dev->zap_loaded)
Carter Cooper1d8f5472017-03-15 15:01:09 -06001534 ret = adreno_switch_to_unsecure_mode(adreno_dev, rb);
1535 else
1536 adreno_writereg(adreno_dev,
1537 ADRENO_REG_RBBM_SECVID_TRUST_CONTROL, 0x0);
1538
1539 return ret;
1540}
1541
Lynus Vaze1fabf12017-10-09 21:33:26 +05301542static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
1543{
1544 int i;
1545 struct adreno_ringbuffer *rb;
1546
1547 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
1548 if (rb->drawctxt_active)
1549 kgsl_context_put(&(rb->drawctxt_active->base));
1550 rb->drawctxt_active = NULL;
1551
1552 kgsl_sharedmem_writel(KGSL_DEVICE(adreno_dev),
1553 &rb->pagetable_desc, PT_INFO_OFFSET(current_rb_ptname),
1554 0);
1555 }
1556}
1557
Shrenuj Bansala419c792016-10-20 14:05:11 -07001558/**
1559 * _adreno_start - Power up the GPU and prepare to accept commands
1560 * @adreno_dev: Pointer to an adreno_device structure
1561 *
1562 * The core function that powers up and initalizes the GPU. This function is
1563 * called at init and after coming out of SLUMBER
1564 */
1565static int _adreno_start(struct adreno_device *adreno_dev)
1566{
1567 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1568 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1569 int status = -EINVAL, ret;
1570 unsigned int state = device->state;
1571 bool regulator_left_on;
1572 unsigned int pmqos_wakeup_vote = device->pwrctrl.pm_qos_wakeup_latency;
1573 unsigned int pmqos_active_vote = device->pwrctrl.pm_qos_active_latency;
1574
1575 /* make sure ADRENO_DEVICE_STARTED is not set here */
1576 BUG_ON(test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv));
1577
Gaurav Sonwanic169c322017-06-15 14:11:23 +05301578 /* disallow l2pc during wake up to improve GPU wake up time */
1579 kgsl_pwrctrl_update_l2pc(&adreno_dev->dev,
1580 KGSL_L2PC_WAKEUP_TIMEOUT);
1581
Shrenuj Bansala419c792016-10-20 14:05:11 -07001582 pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
1583 pmqos_wakeup_vote);
1584
1585 regulator_left_on = regulators_left_on(device);
1586
1587 /* Clear any GPU faults that might have been left over */
1588 adreno_clear_gpu_fault(adreno_dev);
1589
1590 /* Put the GPU in a responsive state */
1591 status = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
1592 if (status)
1593 goto error_pwr_off;
1594
Lynus Vaze1fabf12017-10-09 21:33:26 +05301595 /* Set any stale active contexts to NULL */
1596 adreno_set_active_ctxs_null(adreno_dev);
1597
Shrenuj Bansala419c792016-10-20 14:05:11 -07001598 /* Set the bit to indicate that we've just powered on */
1599 set_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv);
1600
1601 /* Soft reset the GPU if a regulator is stuck on*/
1602 if (regulator_left_on)
1603 _soft_reset(adreno_dev);
1604
1605 adreno_ringbuffer_set_global(adreno_dev, 0);
1606
1607 status = kgsl_mmu_start(device);
1608 if (status)
1609 goto error_pwr_off;
1610
1611 _set_secvid(device);
1612
1613 status = adreno_ocmem_malloc(adreno_dev);
1614 if (status) {
1615 KGSL_DRV_ERR(device, "OCMEM malloc failed\n");
1616 goto error_mmu_off;
1617 }
1618
Carter Coopera2a12982017-05-02 08:43:15 -06001619 /* Send OOB request to turn on the GX */
1620 if (gpudev->oob_set) {
Kyle Piefer8fe58df2017-09-12 09:19:28 -07001621 status = gpudev->oob_set(adreno_dev, OOB_GPU_SET_MASK,
1622 OOB_GPU_CHECK_MASK,
1623 OOB_GPU_CLEAR_MASK);
Carter Coopera2a12982017-05-02 08:43:15 -06001624 if (status)
1625 goto error_mmu_off;
1626 }
1627
Shrenuj Bansala419c792016-10-20 14:05:11 -07001628 /* Enable 64 bit gpu addr if feature is set */
1629 if (gpudev->enable_64bit &&
1630 adreno_support_64bit(adreno_dev))
1631 gpudev->enable_64bit(adreno_dev);
1632
1633 if (adreno_dev->perfctr_pwr_lo == 0) {
1634 ret = adreno_perfcounter_get(adreno_dev,
1635 KGSL_PERFCOUNTER_GROUP_PWR, 1,
1636 &adreno_dev->perfctr_pwr_lo, NULL,
1637 PERFCOUNTER_FLAG_KERNEL);
1638
1639 if (ret) {
Kyle Piefer74645b532017-05-16 11:45:40 -07001640 WARN_ONCE(1, "Unable to get perf counters for DCVS\n");
Shrenuj Bansala419c792016-10-20 14:05:11 -07001641 adreno_dev->perfctr_pwr_lo = 0;
1642 }
1643 }
1644
1645
1646 if (device->pwrctrl.bus_control) {
1647 /* VBIF waiting for RAM */
1648 if (adreno_dev->starved_ram_lo == 0) {
1649 ret = adreno_perfcounter_get(adreno_dev,
1650 KGSL_PERFCOUNTER_GROUP_VBIF_PWR, 0,
1651 &adreno_dev->starved_ram_lo, NULL,
1652 PERFCOUNTER_FLAG_KERNEL);
1653
1654 if (ret) {
1655 KGSL_DRV_ERR(device,
1656 "Unable to get perf counters for bus DCVS\n");
1657 adreno_dev->starved_ram_lo = 0;
1658 }
1659 }
1660
Deepak Kumarc52781f2017-11-06 16:10:17 +05301661 if (adreno_has_gbif(adreno_dev)) {
1662 if (adreno_dev->starved_ram_lo_ch1 == 0) {
1663 ret = adreno_perfcounter_get(adreno_dev,
1664 KGSL_PERFCOUNTER_GROUP_VBIF_PWR, 1,
1665 &adreno_dev->starved_ram_lo_ch1, NULL,
1666 PERFCOUNTER_FLAG_KERNEL);
1667
1668 if (ret) {
1669 KGSL_DRV_ERR(device,
1670 "Unable to get perf counters for bus DCVS\n");
1671 adreno_dev->starved_ram_lo_ch1 = 0;
1672 }
1673 }
Deepak Kumarc52781f2017-11-06 16:10:17 +05301674
Deepak Kumar84b9e032017-11-08 13:08:50 +05301675 if (adreno_dev->ram_cycles_lo == 0) {
1676 ret = adreno_perfcounter_get(adreno_dev,
1677 KGSL_PERFCOUNTER_GROUP_VBIF,
1678 GBIF_AXI0_READ_DATA_TOTAL_BEATS,
1679 &adreno_dev->ram_cycles_lo, NULL,
1680 PERFCOUNTER_FLAG_KERNEL);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001681
Deepak Kumar84b9e032017-11-08 13:08:50 +05301682 if (ret) {
1683 KGSL_DRV_ERR(device,
1684 "Unable to get perf counters for bus DCVS\n");
1685 adreno_dev->ram_cycles_lo = 0;
1686 }
1687 }
1688
1689 if (adreno_dev->ram_cycles_lo_ch1_read == 0) {
1690 ret = adreno_perfcounter_get(adreno_dev,
1691 KGSL_PERFCOUNTER_GROUP_VBIF,
1692 GBIF_AXI1_READ_DATA_TOTAL_BEATS,
1693 &adreno_dev->ram_cycles_lo_ch1_read,
1694 NULL,
1695 PERFCOUNTER_FLAG_KERNEL);
1696
1697 if (ret) {
1698 KGSL_DRV_ERR(device,
1699 "Unable to get perf counters for bus DCVS\n");
1700 adreno_dev->ram_cycles_lo_ch1_read = 0;
1701 }
1702 }
1703
1704 if (adreno_dev->ram_cycles_lo_ch0_write == 0) {
1705 ret = adreno_perfcounter_get(adreno_dev,
1706 KGSL_PERFCOUNTER_GROUP_VBIF,
1707 GBIF_AXI0_WRITE_DATA_TOTAL_BEATS,
1708 &adreno_dev->ram_cycles_lo_ch0_write,
1709 NULL,
1710 PERFCOUNTER_FLAG_KERNEL);
1711
1712 if (ret) {
1713 KGSL_DRV_ERR(device,
1714 "Unable to get perf counters for bus DCVS\n");
1715 adreno_dev->ram_cycles_lo_ch0_write = 0;
1716 }
1717 }
1718
1719 if (adreno_dev->ram_cycles_lo_ch1_write == 0) {
1720 ret = adreno_perfcounter_get(adreno_dev,
1721 KGSL_PERFCOUNTER_GROUP_VBIF,
1722 GBIF_AXI1_WRITE_DATA_TOTAL_BEATS,
1723 &adreno_dev->ram_cycles_lo_ch1_write,
1724 NULL,
1725 PERFCOUNTER_FLAG_KERNEL);
1726
1727 if (ret) {
1728 KGSL_DRV_ERR(device,
1729 "Unable to get perf counters for bus DCVS\n");
1730 adreno_dev->ram_cycles_lo_ch1_write = 0;
1731 }
1732 }
1733 } else {
1734 /* VBIF DDR cycles */
1735 if (adreno_dev->ram_cycles_lo == 0) {
1736 ret = adreno_perfcounter_get(adreno_dev,
1737 KGSL_PERFCOUNTER_GROUP_VBIF,
1738 VBIF_AXI_TOTAL_BEATS,
1739 &adreno_dev->ram_cycles_lo, NULL,
1740 PERFCOUNTER_FLAG_KERNEL);
1741
1742 if (ret) {
1743 KGSL_DRV_ERR(device,
1744 "Unable to get perf counters for bus DCVS\n");
1745 adreno_dev->ram_cycles_lo = 0;
1746 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07001747 }
1748 }
1749 }
1750
1751 /* Clear the busy_data stats - we're starting over from scratch */
1752 adreno_dev->busy_data.gpu_busy = 0;
Deepak Kumar84b9e032017-11-08 13:08:50 +05301753 adreno_dev->busy_data.bif_ram_cycles = 0;
1754 adreno_dev->busy_data.bif_ram_cycles_read_ch1 = 0;
1755 adreno_dev->busy_data.bif_ram_cycles_write_ch0 = 0;
1756 adreno_dev->busy_data.bif_ram_cycles_write_ch1 = 0;
1757 adreno_dev->busy_data.bif_starved_ram = 0;
1758 adreno_dev->busy_data.bif_starved_ram_ch1 = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001759
1760 /* Restore performance counter registers with saved values */
1761 adreno_perfcounter_restore(adreno_dev);
1762
1763 /* Start the GPU */
1764 gpudev->start(adreno_dev);
1765
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001766 /*
1767 * The system cache control registers
1768 * live on the CX rail. Hence need
1769 * reprogramming everytime the GPU
1770 * comes out of power collapse.
1771 */
1772 adreno_llc_setup(device);
1773
Shrenuj Bansala419c792016-10-20 14:05:11 -07001774 /* Re-initialize the coresight registers if applicable */
1775 adreno_coresight_start(adreno_dev);
1776
1777 adreno_irqctrl(adreno_dev, 1);
1778
1779 adreno_perfcounter_start(adreno_dev);
1780
1781 /* Clear FSR here in case it is set from a previous pagefault */
1782 kgsl_mmu_clear_fsr(&device->mmu);
1783
1784 status = adreno_ringbuffer_start(adreno_dev, ADRENO_START_COLD);
1785 if (status)
Carter Coopera2a12982017-05-02 08:43:15 -06001786 goto error_oob_clear;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001787
1788 /* Start the dispatcher */
1789 adreno_dispatcher_start(device);
1790
1791 device->reset_counter++;
1792
1793 set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
1794
1795 if (pmqos_active_vote != pmqos_wakeup_vote)
1796 pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
1797 pmqos_active_vote);
1798
Carter Coopera2a12982017-05-02 08:43:15 -06001799 /* Send OOB request to allow IFPC */
Kyle Piefer83656c82017-09-11 14:23:37 -07001800 if (gpudev->oob_clear) {
Kyle Piefer8fe58df2017-09-12 09:19:28 -07001801 gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
Carter Coopera2a12982017-05-02 08:43:15 -06001802
Kyle Piefer83656c82017-09-11 14:23:37 -07001803 /* If we made it this far, the BOOT OOB was sent to the GMU */
1804 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1805 gpudev->oob_clear(adreno_dev,
1806 OOB_BOOT_SLUMBER_CLEAR_MASK);
1807 }
1808
Shrenuj Bansala419c792016-10-20 14:05:11 -07001809 return 0;
1810
Carter Coopera2a12982017-05-02 08:43:15 -06001811error_oob_clear:
Kyle Piefer8fe58df2017-09-12 09:19:28 -07001812 if (gpudev->oob_clear)
1813 gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
Carter Coopera2a12982017-05-02 08:43:15 -06001814
Shrenuj Bansala419c792016-10-20 14:05:11 -07001815error_mmu_off:
1816 kgsl_mmu_stop(&device->mmu);
Kyle Piefer8fe58df2017-09-12 09:19:28 -07001817 if (gpudev->oob_clear &&
1818 ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
1819 gpudev->oob_clear(adreno_dev,
1820 OOB_BOOT_SLUMBER_CLEAR_MASK);
1821 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07001822
1823error_pwr_off:
1824 /* set the state back to original state */
1825 kgsl_pwrctrl_change_state(device, state);
1826
1827 if (pmqos_active_vote != pmqos_wakeup_vote)
1828 pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
1829 pmqos_active_vote);
1830
1831 return status;
1832}
1833
1834/**
1835 * adreno_start() - Power up and initialize the GPU
1836 * @device: Pointer to the KGSL device to power up
1837 * @priority: Boolean flag to specify of the start should be scheduled in a low
1838 * latency work queue
1839 *
1840 * Power up the GPU and initialize it. If priority is specified then elevate
1841 * the thread priority for the duration of the start operation
1842 */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07001843int adreno_start(struct kgsl_device *device, int priority)
Shrenuj Bansala419c792016-10-20 14:05:11 -07001844{
1845 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1846 int nice = task_nice(current);
1847 int ret;
1848
1849 if (priority && (adreno_wake_nice < nice))
1850 set_user_nice(current, adreno_wake_nice);
1851
1852 ret = _adreno_start(adreno_dev);
1853
1854 if (priority)
1855 set_user_nice(current, nice);
1856
1857 return ret;
1858}
1859
Shrenuj Bansala419c792016-10-20 14:05:11 -07001860static int adreno_stop(struct kgsl_device *device)
1861{
1862 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Piefer4033f562017-08-16 10:00:48 -07001863 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Kyle Piefer8fe58df2017-09-12 09:19:28 -07001864 int error = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001865
1866 if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
1867 return 0;
1868
Kyle Piefer8fe58df2017-09-12 09:19:28 -07001869 /* Turn the power on one last time before stopping */
1870 if (gpudev->oob_set) {
1871 error = gpudev->oob_set(adreno_dev, OOB_GPU_SET_MASK,
1872 OOB_GPU_CHECK_MASK,
1873 OOB_GPU_CLEAR_MASK);
1874 if (error) {
1875 gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
1876 return error;
1877 }
1878 }
1879
Shrenuj Bansala419c792016-10-20 14:05:11 -07001880 adreno_dispatcher_stop(adreno_dev);
1881
1882 adreno_ringbuffer_stop(adreno_dev);
1883
1884 kgsl_pwrscale_update_stats(device);
1885
1886 adreno_irqctrl(adreno_dev, 0);
1887
1888 adreno_ocmem_free(adreno_dev);
1889
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001890 if (adreno_dev->gpu_llc_slice)
1891 adreno_llc_deactivate_slice(adreno_dev->gpu_llc_slice);
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001892 if (adreno_dev->gpuhtw_llc_slice)
1893 adreno_llc_deactivate_slice(adreno_dev->gpuhtw_llc_slice);
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001894
Shrenuj Bansala419c792016-10-20 14:05:11 -07001895 /* Save active coresight registers if applicable */
1896 adreno_coresight_stop(adreno_dev);
1897
1898 /* Save physical performance counter values before GPU power down*/
1899 adreno_perfcounter_save(adreno_dev);
1900
Kyle Piefer8fe58df2017-09-12 09:19:28 -07001901 if (gpudev->oob_clear)
1902 gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
1903
Kyle Piefer4033f562017-08-16 10:00:48 -07001904 /*
1905 * Saving perfcounters will use an OOB to put the GMU into
1906 * active state. Before continuing, we should wait for the
1907 * GMU to return to the lowest idle level. This is
1908 * because some idle level transitions require VBIF and MMU.
1909 */
1910 if (gpudev->wait_for_lowest_idle &&
George Shenf6c15bd2017-11-01 12:22:12 -07001911 gpudev->wait_for_lowest_idle(adreno_dev)) {
1912 struct gmu_device *gmu = &device->gmu;
1913
1914 set_bit(GMU_FAULT, &gmu->flags);
1915 gmu_snapshot(device);
1916 /*
1917 * Assume GMU hang after 10ms without responding.
1918 * It shall be relative safe to clear vbif and stop
1919 * MMU later. Early return in adreno_stop function
1920 * will result in kernel panic in adreno_start
1921 */
1922 error = -EINVAL;
1923 }
Kyle Piefer4033f562017-08-16 10:00:48 -07001924
Shrenuj Bansala419c792016-10-20 14:05:11 -07001925 adreno_vbif_clear_pending_transactions(device);
1926
1927 kgsl_mmu_stop(&device->mmu);
1928
Harshdeep Dhatt6342e6b2017-09-21 21:25:21 -06001929 /*
1930 * At this point, MMU is turned off so we can safely
1931 * destroy any pending contexts and their pagetables
1932 */
1933 adreno_set_active_ctxs_null(adreno_dev);
1934
Shrenuj Bansala419c792016-10-20 14:05:11 -07001935 clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
1936
George Shenf6c15bd2017-11-01 12:22:12 -07001937 return error;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001938}
1939
1940static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
1941{
1942 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1943
1944 /*
1945 * Do not do soft reset for a IOMMU fault (because the IOMMU hardware
1946 * needs a reset too) or for the A304 because it can't do SMMU
1947 * programming of any kind after a soft reset
1948 */
1949
1950 if ((fault & ADRENO_IOMMU_PAGE_FAULT) || adreno_is_a304(adreno_dev))
1951 return false;
1952
1953 return true;
1954}
1955
1956/**
1957 * adreno_reset() - Helper function to reset the GPU
1958 * @device: Pointer to the KGSL device structure for the GPU
1959 * @fault: Type of fault. Needed to skip soft reset for MMU fault
1960 *
1961 * Try to reset the GPU to recover from a fault. First, try to do a low latency
1962 * soft reset. If the soft reset fails for some reason, then bring out the big
1963 * guns and toggle the footswitch.
1964 */
1965int adreno_reset(struct kgsl_device *device, int fault)
1966{
1967 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1968 int ret = -EINVAL;
1969 int i = 0;
1970
1971 /* Try soft reset first */
1972 if (adreno_try_soft_reset(device, fault)) {
1973 /* Make sure VBIF is cleared before resetting */
1974 ret = adreno_vbif_clear_pending_transactions(device);
1975
1976 if (ret == 0) {
1977 ret = adreno_soft_reset(device);
1978 if (ret)
1979 KGSL_DEV_ERR_ONCE(device,
1980 "Device soft reset failed\n");
1981 }
1982 }
1983 if (ret) {
1984 /* If soft reset failed/skipped, then pull the power */
1985 kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
1986 /* since device is officially off now clear start bit */
1987 clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
1988
1989 /* Keep trying to start the device until it works */
1990 for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) {
1991 ret = adreno_start(device, 0);
1992 if (!ret)
1993 break;
1994
1995 msleep(20);
1996 }
1997 }
1998 if (ret)
1999 return ret;
2000
2001 if (i != 0)
2002 KGSL_DRV_WARN(device, "Device hard reset tried %d tries\n", i);
2003
2004 /*
2005 * If active_cnt is non-zero then the system was active before
2006 * going into a reset - put it back in that state
2007 */
2008
2009 if (atomic_read(&device->active_cnt))
2010 kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
2011 else
2012 kgsl_pwrctrl_change_state(device, KGSL_STATE_NAP);
2013
2014 return ret;
2015}
2016
2017static int adreno_getproperty(struct kgsl_device *device,
2018 unsigned int type,
2019 void __user *value,
2020 size_t sizebytes)
2021{
2022 int status = -EINVAL;
2023 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2024
2025 switch (type) {
2026 case KGSL_PROP_DEVICE_INFO:
2027 {
2028 struct kgsl_devinfo devinfo;
2029
2030 if (sizebytes != sizeof(devinfo)) {
2031 status = -EINVAL;
2032 break;
2033 }
2034
2035 memset(&devinfo, 0, sizeof(devinfo));
2036 devinfo.device_id = device->id+1;
2037 devinfo.chip_id = adreno_dev->chipid;
2038 devinfo.mmu_enabled =
2039 MMU_FEATURE(&device->mmu, KGSL_MMU_PAGED);
2040 devinfo.gmem_gpubaseaddr = adreno_dev->gmem_base;
2041 devinfo.gmem_sizebytes = adreno_dev->gmem_size;
2042
2043 if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
2044 0) {
2045 status = -EFAULT;
2046 break;
2047 }
2048 status = 0;
2049 }
2050 break;
2051 case KGSL_PROP_DEVICE_SHADOW:
2052 {
2053 struct kgsl_shadowprop shadowprop;
2054
2055 if (sizebytes != sizeof(shadowprop)) {
2056 status = -EINVAL;
2057 break;
2058 }
2059 memset(&shadowprop, 0, sizeof(shadowprop));
2060 if (device->memstore.hostptr) {
2061 /*NOTE: with mmu enabled, gpuaddr doesn't mean
2062 * anything to mmap().
2063 */
2064 shadowprop.gpuaddr =
2065 (unsigned int) device->memstore.gpuaddr;
2066 shadowprop.size = device->memstore.size;
2067 /* GSL needs this to be set, even if it
2068 * appears to be meaningless
2069 */
2070 shadowprop.flags = KGSL_FLAGS_INITIALIZED |
2071 KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS;
2072 }
2073 if (copy_to_user(value, &shadowprop,
2074 sizeof(shadowprop))) {
2075 status = -EFAULT;
2076 break;
2077 }
2078 status = 0;
2079 }
2080 break;
2081 case KGSL_PROP_DEVICE_QDSS_STM:
2082 {
2083 struct kgsl_qdss_stm_prop qdssprop = {0};
2084 struct kgsl_memdesc *qdss_desc =
2085 kgsl_mmu_get_qdss_global_entry(device);
2086
2087 if (sizebytes != sizeof(qdssprop)) {
2088 status = -EINVAL;
2089 break;
2090 }
2091
2092 if (qdss_desc) {
2093 qdssprop.gpuaddr = qdss_desc->gpuaddr;
2094 qdssprop.size = qdss_desc->size;
2095 }
2096
2097 if (copy_to_user(value, &qdssprop,
2098 sizeof(qdssprop))) {
2099 status = -EFAULT;
2100 break;
2101 }
2102 status = 0;
2103 }
2104 break;
Jonathan Wicks4892d8d2017-02-24 16:21:26 -07002105 case KGSL_PROP_DEVICE_QTIMER:
2106 {
2107 struct kgsl_qtimer_prop qtimerprop = {0};
2108 struct kgsl_memdesc *qtimer_desc =
2109 kgsl_mmu_get_qtimer_global_entry(device);
2110
2111 if (sizebytes != sizeof(qtimerprop)) {
2112 status = -EINVAL;
2113 break;
2114 }
2115
2116 if (qtimer_desc) {
2117 qtimerprop.gpuaddr = qtimer_desc->gpuaddr;
2118 qtimerprop.size = qtimer_desc->size;
2119 }
2120
2121 if (copy_to_user(value, &qtimerprop,
2122 sizeof(qtimerprop))) {
2123 status = -EFAULT;
2124 break;
2125 }
2126 status = 0;
2127 }
2128 break;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002129 case KGSL_PROP_MMU_ENABLE:
2130 {
2131 /* Report MMU only if we can handle paged memory */
2132 int mmu_prop = MMU_FEATURE(&device->mmu,
2133 KGSL_MMU_PAGED);
2134
2135 if (sizebytes < sizeof(mmu_prop)) {
2136 status = -EINVAL;
2137 break;
2138 }
2139 if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) {
2140 status = -EFAULT;
2141 break;
2142 }
2143 status = 0;
2144 }
2145 break;
2146 case KGSL_PROP_INTERRUPT_WAITS:
2147 {
2148 int int_waits = 1;
2149
2150 if (sizebytes != sizeof(int)) {
2151 status = -EINVAL;
2152 break;
2153 }
2154 if (copy_to_user(value, &int_waits, sizeof(int))) {
2155 status = -EFAULT;
2156 break;
2157 }
2158 status = 0;
2159 }
2160 break;
2161 case KGSL_PROP_UCHE_GMEM_VADDR:
2162 {
2163 uint64_t gmem_vaddr = 0;
2164
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002165 if (adreno_is_a5xx(adreno_dev) ||
2166 adreno_is_a6xx(adreno_dev))
Shrenuj Bansala419c792016-10-20 14:05:11 -07002167 gmem_vaddr = ADRENO_UCHE_GMEM_BASE;
2168 if (sizebytes != sizeof(uint64_t)) {
2169 status = -EINVAL;
2170 break;
2171 }
2172 if (copy_to_user(value, &gmem_vaddr,
2173 sizeof(uint64_t))) {
2174 status = -EFAULT;
2175 break;
2176 }
2177 status = 0;
2178 }
2179 break;
2180 case KGSL_PROP_SP_GENERIC_MEM:
2181 {
2182 struct kgsl_sp_generic_mem sp_mem;
2183
2184 if (sizebytes != sizeof(sp_mem)) {
2185 status = -EINVAL;
2186 break;
2187 }
2188 memset(&sp_mem, 0, sizeof(sp_mem));
2189
2190 sp_mem.local = adreno_dev->sp_local_gpuaddr;
2191 sp_mem.pvt = adreno_dev->sp_pvt_gpuaddr;
2192
2193 if (copy_to_user(value, &sp_mem, sizeof(sp_mem))) {
2194 status = -EFAULT;
2195 break;
2196 }
2197 status = 0;
2198 }
2199 break;
2200 case KGSL_PROP_UCODE_VERSION:
2201 {
2202 struct kgsl_ucode_version ucode;
2203
2204 if (sizebytes != sizeof(ucode)) {
2205 status = -EINVAL;
2206 break;
2207 }
2208 memset(&ucode, 0, sizeof(ucode));
2209
Shrenuj Bansalacf1ef42016-06-01 11:11:27 -07002210 ucode.pfp = adreno_dev->fw[ADRENO_FW_PFP].version;
2211 ucode.pm4 = adreno_dev->fw[ADRENO_FW_PM4].version;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002212
2213 if (copy_to_user(value, &ucode, sizeof(ucode))) {
2214 status = -EFAULT;
2215 break;
2216 }
2217 status = 0;
2218 }
2219 break;
2220 case KGSL_PROP_GPMU_VERSION:
2221 {
2222 struct kgsl_gpmu_version gpmu;
2223
2224 if (adreno_dev->gpucore == NULL) {
2225 status = -EINVAL;
2226 break;
2227 }
2228
2229 if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU)) {
2230 status = -EOPNOTSUPP;
2231 break;
2232 }
2233
2234 if (sizebytes != sizeof(gpmu)) {
2235 status = -EINVAL;
2236 break;
2237 }
2238 memset(&gpmu, 0, sizeof(gpmu));
2239
2240 gpmu.major = adreno_dev->gpucore->gpmu_major;
2241 gpmu.minor = adreno_dev->gpucore->gpmu_minor;
2242 gpmu.features = adreno_dev->gpucore->gpmu_features;
2243
2244 if (copy_to_user(value, &gpmu, sizeof(gpmu))) {
2245 status = -EFAULT;
2246 break;
2247 }
2248 status = 0;
2249 }
2250 break;
2251 case KGSL_PROP_HIGHEST_BANK_BIT:
2252 {
2253 unsigned int bit;
2254
2255 if (sizebytes < sizeof(unsigned int)) {
2256 status = -EINVAL;
2257 break;
2258 }
2259
2260 if (of_property_read_u32(device->pdev->dev.of_node,
2261 "qcom,highest-bank-bit", &bit)) {
2262 status = -EINVAL;
2263 break;
2264 }
2265
2266 if (copy_to_user(value, &bit, sizeof(bit))) {
2267 status = -EFAULT;
2268 break;
2269 }
2270 }
2271 status = 0;
2272 break;
Shrenuj Bansala9ae9de2016-11-15 16:01:00 -08002273 case KGSL_PROP_MIN_ACCESS_LENGTH:
2274 {
2275 unsigned int mal;
2276
2277 if (sizebytes < sizeof(unsigned int)) {
2278 status = -EINVAL;
2279 break;
2280 }
2281
2282 if (of_property_read_u32(device->pdev->dev.of_node,
2283 "qcom,min-access-length", &mal)) {
2284 mal = 0;
2285 }
2286
2287 if (copy_to_user(value, &mal, sizeof(mal))) {
2288 status = -EFAULT;
2289 break;
2290 }
2291 }
2292 status = 0;
2293 break;
2294 case KGSL_PROP_UBWC_MODE:
2295 {
2296 unsigned int mode;
2297
2298 if (sizebytes < sizeof(unsigned int)) {
2299 status = -EINVAL;
2300 break;
2301 }
2302
2303 if (of_property_read_u32(device->pdev->dev.of_node,
2304 "qcom,ubwc-mode", &mode))
2305 mode = 0;
2306
2307 if (copy_to_user(value, &mode, sizeof(mode))) {
2308 status = -EFAULT;
2309 break;
2310 }
2311 }
2312 status = 0;
2313 break;
2314
Shrenuj Bansala419c792016-10-20 14:05:11 -07002315 case KGSL_PROP_DEVICE_BITNESS:
2316 {
2317 unsigned int bitness = 32;
2318
2319 if (sizebytes != sizeof(unsigned int)) {
2320 status = -EINVAL;
2321 break;
2322 }
2323 /* No of bits used by the GPU */
2324 if (adreno_support_64bit(adreno_dev))
2325 bitness = 48;
2326
2327 if (copy_to_user(value, &bitness,
2328 sizeof(unsigned int))) {
2329 status = -EFAULT;
2330 break;
2331 }
2332 status = 0;
2333 }
2334 break;
2335
2336 default:
2337 status = -EINVAL;
2338 }
2339
2340 return status;
2341}
2342
2343int adreno_set_constraint(struct kgsl_device *device,
2344 struct kgsl_context *context,
2345 struct kgsl_device_constraint *constraint)
2346{
2347 int status = 0;
2348
2349 switch (constraint->type) {
2350 case KGSL_CONSTRAINT_PWRLEVEL: {
2351 struct kgsl_device_constraint_pwrlevel pwr;
2352
2353 if (constraint->size != sizeof(pwr)) {
2354 status = -EINVAL;
2355 break;
2356 }
2357
2358 if (copy_from_user(&pwr,
2359 (void __user *)constraint->data,
2360 sizeof(pwr))) {
2361 status = -EFAULT;
2362 break;
2363 }
2364 if (pwr.level >= KGSL_CONSTRAINT_PWR_MAXLEVELS) {
2365 status = -EINVAL;
2366 break;
2367 }
2368
2369 context->pwr_constraint.type =
2370 KGSL_CONSTRAINT_PWRLEVEL;
2371 context->pwr_constraint.sub_type = pwr.level;
2372 trace_kgsl_user_pwrlevel_constraint(device,
2373 context->id,
2374 context->pwr_constraint.type,
2375 context->pwr_constraint.sub_type);
2376 }
2377 break;
2378 case KGSL_CONSTRAINT_NONE:
2379 if (context->pwr_constraint.type == KGSL_CONSTRAINT_PWRLEVEL)
2380 trace_kgsl_user_pwrlevel_constraint(device,
2381 context->id,
2382 KGSL_CONSTRAINT_NONE,
2383 context->pwr_constraint.sub_type);
2384 context->pwr_constraint.type = KGSL_CONSTRAINT_NONE;
2385 break;
2386
2387 default:
2388 status = -EINVAL;
2389 break;
2390 }
2391
2392 /* If a new constraint has been set for a context, cancel the old one */
2393 if ((status == 0) &&
2394 (context->id == device->pwrctrl.constraint.owner_id)) {
2395 trace_kgsl_constraint(device, device->pwrctrl.constraint.type,
2396 device->pwrctrl.active_pwrlevel, 0);
2397 device->pwrctrl.constraint.type = KGSL_CONSTRAINT_NONE;
2398 }
2399
2400 return status;
2401}
2402
2403static int adreno_setproperty(struct kgsl_device_private *dev_priv,
2404 unsigned int type,
2405 void __user *value,
2406 unsigned int sizebytes)
2407{
2408 int status = -EINVAL;
2409 struct kgsl_device *device = dev_priv->device;
2410 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2411
2412 switch (type) {
2413 case KGSL_PROP_PWRCTRL: {
2414 unsigned int enable;
2415
2416 if (sizebytes != sizeof(enable))
2417 break;
2418
2419 if (copy_from_user(&enable, value, sizeof(enable))) {
2420 status = -EFAULT;
2421 break;
2422 }
2423
2424 mutex_lock(&device->mutex);
2425
2426 if (enable) {
2427 device->pwrctrl.ctrl_flags = 0;
2428
2429 if (!kgsl_active_count_get(device)) {
2430 adreno_fault_detect_start(adreno_dev);
2431 kgsl_active_count_put(device);
2432 }
2433
2434 kgsl_pwrscale_enable(device);
2435 } else {
2436 kgsl_pwrctrl_change_state(device,
2437 KGSL_STATE_ACTIVE);
2438 device->pwrctrl.ctrl_flags = KGSL_PWR_ON;
2439 adreno_fault_detect_stop(adreno_dev);
2440 kgsl_pwrscale_disable(device, true);
2441 }
2442
2443 mutex_unlock(&device->mutex);
2444 status = 0;
2445 }
2446 break;
2447 case KGSL_PROP_PWR_CONSTRAINT: {
2448 struct kgsl_device_constraint constraint;
2449 struct kgsl_context *context;
2450
2451 if (sizebytes != sizeof(constraint))
2452 break;
2453
2454 if (copy_from_user(&constraint, value,
2455 sizeof(constraint))) {
2456 status = -EFAULT;
2457 break;
2458 }
2459
2460 context = kgsl_context_get_owner(dev_priv,
2461 constraint.context_id);
2462
2463 if (context == NULL)
2464 break;
2465
2466 status = adreno_set_constraint(device, context,
2467 &constraint);
2468
2469 kgsl_context_put(context);
2470 }
2471 break;
2472 default:
2473 break;
2474 }
2475
2476 return status;
2477}
2478
2479/*
2480 * adreno_irq_pending() - Checks if interrupt is generated by h/w
2481 * @adreno_dev: Pointer to device whose interrupts are checked
2482 *
2483 * Returns true if interrupts are pending from device else 0.
2484 */
2485inline unsigned int adreno_irq_pending(struct adreno_device *adreno_dev)
2486{
2487 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
2488 unsigned int status;
2489
2490 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
2491
Deepak Kumar273c5712017-01-03 21:49:03 +05302492 /*
2493 * IRQ handler clears the RBBM INT0 status register immediately
2494 * entering the ISR before actually serving the interrupt because
2495 * of this we can't rely only on RBBM INT0 status only.
2496 * Use pending_irq_refcnt along with RBBM INT0 to correctly
2497 * determine whether any IRQ is pending or not.
2498 */
2499 if ((status & gpudev->irq->mask) ||
2500 atomic_read(&adreno_dev->pending_irq_refcnt))
2501 return 1;
2502 else
2503 return 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002504}
2505
2506
2507/**
2508 * adreno_hw_isidle() - Check if the GPU core is idle
2509 * @adreno_dev: Pointer to the Adreno device structure for the GPU
2510 *
2511 * Return true if the RBBM status register for the GPU type indicates that the
2512 * hardware is idle
2513 */
2514bool adreno_hw_isidle(struct adreno_device *adreno_dev)
2515{
2516 const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
2517 unsigned int reg_rbbm_status;
Oleg Perelet62d5cec2017-03-27 16:14:52 -07002518 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
2519
2520 /* if hw driver implements idle check - use it */
2521 if (gpudev->hw_isidle)
2522 return gpudev->hw_isidle(adreno_dev);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002523
2524 if (adreno_is_a540(adreno_dev))
2525 /**
2526 * Due to CRC idle throttling GPU
2527 * idle hysteresys can take up to
2528 * 3usec for expire - account for it
2529 */
2530 udelay(5);
2531
2532 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
2533 &reg_rbbm_status);
2534
2535 if (reg_rbbm_status & gpucore->busy_mask)
2536 return false;
2537
2538 /* Don't consider ourselves idle if there is an IRQ pending */
2539 if (adreno_irq_pending(adreno_dev))
2540 return false;
2541
2542 return true;
2543}
2544
2545/**
2546 * adreno_soft_reset() - Do a soft reset of the GPU hardware
2547 * @device: KGSL device to soft reset
2548 *
2549 * "soft reset" the GPU hardware - this is a fast path GPU reset
2550 * The GPU hardware is reset but we never pull power so we can skip
2551 * a lot of the standard adreno_stop/adreno_start sequence
2552 */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -07002553int adreno_soft_reset(struct kgsl_device *device)
Shrenuj Bansala419c792016-10-20 14:05:11 -07002554{
2555 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2556 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
2557 int ret;
2558
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002559 if (gpudev->oob_set) {
Kyle Piefer42de1402017-09-15 11:28:47 -07002560 ret = gpudev->oob_set(adreno_dev, OOB_GPU_SET_MASK,
2561 OOB_GPU_CHECK_MASK,
2562 OOB_GPU_CLEAR_MASK);
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002563 if (ret)
2564 return ret;
2565 }
2566
Shrenuj Bansala419c792016-10-20 14:05:11 -07002567 kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
2568 adreno_set_active_ctxs_null(adreno_dev);
2569
2570 adreno_irqctrl(adreno_dev, 0);
2571
2572 adreno_clear_gpu_fault(adreno_dev);
2573 /* since device is oficially off now clear start bit */
2574 clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
2575
2576 /* save physical performance counter values before GPU soft reset */
2577 adreno_perfcounter_save(adreno_dev);
2578
2579 /* Reset the GPU */
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002580 if (gpudev->soft_reset)
2581 ret = gpudev->soft_reset(adreno_dev);
2582 else
2583 ret = _soft_reset(adreno_dev);
2584 if (ret) {
2585 if (gpudev->oob_clear)
Kyle Piefer42de1402017-09-15 11:28:47 -07002586 gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002587 return ret;
2588 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07002589
Abhilash Kumare0118252017-06-15 12:25:24 +05302590 /* Clear the busy_data stats - we're starting over from scratch */
2591 adreno_dev->busy_data.gpu_busy = 0;
Deepak Kumar84b9e032017-11-08 13:08:50 +05302592 adreno_dev->busy_data.bif_ram_cycles = 0;
2593 adreno_dev->busy_data.bif_ram_cycles_read_ch1 = 0;
2594 adreno_dev->busy_data.bif_ram_cycles_write_ch0 = 0;
2595 adreno_dev->busy_data.bif_ram_cycles_write_ch1 = 0;
2596 adreno_dev->busy_data.bif_starved_ram = 0;
2597 adreno_dev->busy_data.bif_starved_ram_ch1 = 0;
Abhilash Kumare0118252017-06-15 12:25:24 +05302598
Shrenuj Bansala419c792016-10-20 14:05:11 -07002599 /* Set the page table back to the default page table */
2600 adreno_ringbuffer_set_global(adreno_dev, 0);
2601 kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);
2602
2603 _set_secvid(device);
2604
2605 /* Enable 64 bit gpu addr if feature is set */
2606 if (gpudev->enable_64bit &&
2607 adreno_support_64bit(adreno_dev))
2608 gpudev->enable_64bit(adreno_dev);
2609
2610
2611 /* Reinitialize the GPU */
2612 gpudev->start(adreno_dev);
2613
2614 /* Re-initialize the coresight registers if applicable */
2615 adreno_coresight_start(adreno_dev);
2616
2617 /* Enable IRQ */
2618 adreno_irqctrl(adreno_dev, 1);
2619
2620 /* stop all ringbuffers to cancel RB events */
2621 adreno_ringbuffer_stop(adreno_dev);
2622 /*
2623 * If we have offsets for the jump tables we can try to do a warm start,
2624 * otherwise do a full ringbuffer restart
2625 */
2626
2627 if (ADRENO_FEATURE(adreno_dev, ADRENO_WARM_START))
2628 ret = adreno_ringbuffer_start(adreno_dev, ADRENO_START_WARM);
2629 else
2630 ret = adreno_ringbuffer_start(adreno_dev, ADRENO_START_COLD);
2631 if (ret == 0) {
2632 device->reset_counter++;
2633 set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
2634 }
2635
2636 /* Restore physical performance counter values after soft reset */
2637 adreno_perfcounter_restore(adreno_dev);
2638
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002639 if (gpudev->oob_clear)
Kyle Piefer42de1402017-09-15 11:28:47 -07002640 gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
Shrenuj Bansal49d0e9f2017-05-08 16:10:24 -07002641
Shrenuj Bansala419c792016-10-20 14:05:11 -07002642 return ret;
2643}
2644
2645/*
2646 * adreno_isidle() - return true if the GPU hardware is idle
2647 * @device: Pointer to the KGSL device structure for the GPU
2648 *
2649 * Return true if the GPU hardware is idle and there are no commands pending in
2650 * the ringbuffer
2651 */
2652bool adreno_isidle(struct kgsl_device *device)
2653{
2654 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2655 struct adreno_ringbuffer *rb;
2656 int i;
2657
2658 if (!kgsl_state_is_awake(device))
2659 return true;
2660
2661 /*
2662 * wptr is updated when we add commands to ringbuffer, add a barrier
2663 * to make sure updated wptr is compared to rptr
2664 */
2665 smp_mb();
2666
2667 /*
2668 * ringbuffer is truly idle when all ringbuffers read and write
2669 * pointers are equal
2670 */
2671
2672 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
2673 if (!adreno_rb_empty(rb))
2674 return false;
2675 }
2676
2677 return adreno_hw_isidle(adreno_dev);
2678}
2679
Carter Cooper8567af02017-03-15 14:22:03 -06002680/* Print some key registers if a spin-for-idle times out */
2681void adreno_spin_idle_debug(struct adreno_device *adreno_dev,
2682 const char *str)
2683{
2684 struct kgsl_device *device = &adreno_dev->dev;
2685 unsigned int rptr, wptr;
2686 unsigned int status, status3, intstatus;
2687 unsigned int hwfault;
2688
2689 dev_err(device->dev, str);
2690
2691 adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
2692 adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
2693
2694 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS, &status);
2695 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS3, &status3);
2696 adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &intstatus);
2697 adreno_readreg(adreno_dev, ADRENO_REG_CP_HW_FAULT, &hwfault);
2698
2699 dev_err(device->dev,
2700 "rb=%d pos=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
2701 adreno_dev->cur_rb->id, rptr, wptr, status, status3, intstatus);
2702
2703 dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
2704
Lynus Vaz43695aa2017-09-01 21:55:23 +05302705 kgsl_device_snapshot(device, NULL, adreno_gmu_gpu_fault(adreno_dev));
Carter Cooper8567af02017-03-15 14:22:03 -06002706}
2707
Shrenuj Bansala419c792016-10-20 14:05:11 -07002708/**
2709 * adreno_spin_idle() - Spin wait for the GPU to idle
2710 * @adreno_dev: Pointer to an adreno device
2711 * @timeout: milliseconds to wait before returning error
2712 *
2713 * Spin the CPU waiting for the RBBM status to return idle
2714 */
2715int adreno_spin_idle(struct adreno_device *adreno_dev, unsigned int timeout)
2716{
2717 unsigned long wait = jiffies + msecs_to_jiffies(timeout);
2718
2719 do {
2720 /*
2721 * If we fault, stop waiting and return an error. The dispatcher
2722 * will clean up the fault from the work queue, but we need to
2723 * make sure we don't block it by waiting for an idle that
2724 * will never come.
2725 */
2726
2727 if (adreno_gpu_fault(adreno_dev) != 0)
2728 return -EDEADLK;
2729
2730 if (adreno_isidle(KGSL_DEVICE(adreno_dev)))
2731 return 0;
2732
2733 } while (time_before(jiffies, wait));
2734
2735 /*
2736 * Under rare conditions, preemption can cause the while loop to exit
2737 * without checking if the gpu is idle. check one last time before we
2738 * return failure.
2739 */
2740 if (adreno_gpu_fault(adreno_dev) != 0)
2741 return -EDEADLK;
2742
2743 if (adreno_isidle(KGSL_DEVICE(adreno_dev)))
2744 return 0;
2745
2746 return -ETIMEDOUT;
2747}
2748
2749/**
2750 * adreno_idle() - wait for the GPU hardware to go idle
2751 * @device: Pointer to the KGSL device structure for the GPU
2752 *
2753 * Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
2754 * Caller must hold the device mutex, and must not hold the dispatcher mutex.
2755 */
2756
2757int adreno_idle(struct kgsl_device *device)
2758{
2759 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2760 int ret;
2761
2762 /*
2763 * Make sure the device mutex is held so the dispatcher can't send any
2764 * more commands to the hardware
2765 */
2766
2767 if (WARN_ON(!mutex_is_locked(&device->mutex)))
2768 return -EDEADLK;
2769
2770 /* Check if we are already idle before idling dispatcher */
2771 if (adreno_isidle(device))
2772 return 0;
2773 /*
2774 * Wait for dispatcher to finish completing commands
2775 * already submitted
2776 */
2777 ret = adreno_dispatcher_idle(adreno_dev);
2778 if (ret)
2779 return ret;
2780
2781 return adreno_spin_idle(adreno_dev, ADRENO_IDLE_TIMEOUT);
2782}
2783
2784/**
2785 * adreno_drain() - Drain the dispatch queue
2786 * @device: Pointer to the KGSL device structure for the GPU
2787 *
2788 * Drain the dispatcher of existing drawobjs. This halts
2789 * additional commands from being issued until the gate is completed.
2790 */
2791static int adreno_drain(struct kgsl_device *device)
2792{
2793 reinit_completion(&device->halt_gate);
2794
2795 return 0;
2796}
2797
2798/* Caller must hold the device mutex. */
2799static int adreno_suspend_context(struct kgsl_device *device)
2800{
2801 /* process any profiling results that are available */
2802 adreno_profile_process_results(ADRENO_DEVICE(device));
2803
2804 /* Wait for the device to go idle */
2805 return adreno_idle(device);
2806}
2807
2808/**
2809 * adreno_read - General read function to read adreno device memory
2810 * @device - Pointer to the GPU device struct (for adreno device)
2811 * @base - Base address (kernel virtual) where the device memory is mapped
2812 * @offsetwords - Offset in words from the base address, of the memory that
2813 * is to be read
2814 * @value - Value read from the device memory
2815 * @mem_len - Length of the device memory mapped to the kernel
2816 */
2817static void adreno_read(struct kgsl_device *device, void __iomem *base,
2818 unsigned int offsetwords, unsigned int *value,
2819 unsigned int mem_len)
2820{
2821
2822 void __iomem *reg;
2823
2824 /* Make sure we're not reading from invalid memory */
2825 if (WARN(offsetwords * sizeof(uint32_t) >= mem_len,
2826 "Out of bounds register read: 0x%x/0x%x\n",
2827 offsetwords, mem_len >> 2))
2828 return;
2829
2830 reg = (base + (offsetwords << 2));
2831
2832 if (!in_interrupt())
2833 kgsl_pre_hwaccess(device);
2834
2835 *value = __raw_readl(reg);
2836 /*
2837 * ensure this read finishes before the next one.
2838 * i.e. act like normal readl()
2839 */
2840 rmb();
2841}
2842
2843/**
2844 * adreno_regread - Used to read adreno device registers
2845 * @offsetwords - Word (4 Bytes) offset to the register to be read
2846 * @value - Value read from device register
2847 */
2848static void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
2849 unsigned int *value)
2850{
2851 adreno_read(device, device->reg_virt, offsetwords, value,
2852 device->reg_len);
2853}
2854
2855/**
2856 * adreno_shadermem_regread - Used to read GPU (adreno) shader memory
2857 * @device - GPU device whose shader memory is to be read
2858 * @offsetwords - Offset in words, of the shader memory address to be read
2859 * @value - Pointer to where the read shader mem value is to be stored
2860 */
2861void adreno_shadermem_regread(struct kgsl_device *device,
2862 unsigned int offsetwords, unsigned int *value)
2863{
2864 adreno_read(device, device->shader_mem_virt, offsetwords, value,
2865 device->shader_mem_len);
2866}
2867
2868static void adreno_regwrite(struct kgsl_device *device,
2869 unsigned int offsetwords,
2870 unsigned int value)
2871{
2872 void __iomem *reg;
2873
2874 /* Make sure we're not writing to an invalid register */
2875 if (WARN(offsetwords * sizeof(uint32_t) >= device->reg_len,
2876 "Out of bounds register write: 0x%x/0x%x\n",
2877 offsetwords, device->reg_len >> 2))
2878 return;
2879
2880 if (!in_interrupt())
2881 kgsl_pre_hwaccess(device);
2882
2883 trace_kgsl_regwrite(device, offsetwords, value);
2884
2885 reg = (device->reg_virt + (offsetwords << 2));
2886
2887 /*
2888 * ensure previous writes post before this one,
2889 * i.e. act like normal writel()
2890 */
2891 wmb();
2892 __raw_writel(value, reg);
2893}
2894
Kyle Pieferb1027b02017-02-10 13:58:58 -08002895static void adreno_gmu_regwrite(struct kgsl_device *device,
2896 unsigned int offsetwords,
2897 unsigned int value)
2898{
2899 void __iomem *reg;
2900 struct gmu_device *gmu = &device->gmu;
2901
Kyle Pieferb1027b02017-02-10 13:58:58 -08002902 trace_kgsl_regwrite(device, offsetwords, value);
2903
Kyle Pieferda6ef632017-06-29 13:18:51 -07002904 offsetwords -= gmu->gmu2gpu_offset;
Kyle Pieferb1027b02017-02-10 13:58:58 -08002905 reg = gmu->reg_virt + (offsetwords << 2);
2906
2907 /*
2908 * ensure previous writes post before this one,
2909 * i.e. act like normal writel()
2910 */
2911 wmb();
2912 __raw_writel(value, reg);
2913}
2914
2915static void adreno_gmu_regread(struct kgsl_device *device,
2916 unsigned int offsetwords,
2917 unsigned int *value)
2918{
2919 void __iomem *reg;
2920 struct gmu_device *gmu = &device->gmu;
2921
2922 offsetwords -= gmu->gmu2gpu_offset;
2923
2924 reg = gmu->reg_virt + (offsetwords << 2);
2925
2926 *value = __raw_readl(reg);
2927
2928 /*
2929 * ensure this read finishes before the next one.
2930 * i.e. act like normal readl()
2931 */
2932 rmb();
2933}
2934
Lynus Vaz9ed8cf92017-09-21 21:55:34 +05302935bool adreno_is_cx_dbgc_register(struct kgsl_device *device,
2936 unsigned int offsetwords)
2937{
2938 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2939
2940 return adreno_dev->cx_dbgc_virt &&
2941 (offsetwords >= (adreno_dev->cx_dbgc_base >> 2)) &&
2942 (offsetwords < (adreno_dev->cx_dbgc_base +
2943 adreno_dev->cx_dbgc_len) >> 2);
2944}
2945
2946void adreno_cx_dbgc_regread(struct kgsl_device *device,
2947 unsigned int offsetwords, unsigned int *value)
2948{
2949 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2950 unsigned int cx_dbgc_offset;
2951
2952 if (!adreno_is_cx_dbgc_register(device, offsetwords))
2953 return;
2954
2955 cx_dbgc_offset = (offsetwords << 2) - adreno_dev->cx_dbgc_base;
2956 *value = __raw_readl(adreno_dev->cx_dbgc_virt + cx_dbgc_offset);
2957
2958 /*
2959 * ensure this read finishes before the next one.
2960 * i.e. act like normal readl()
2961 */
2962 rmb();
2963}
2964
2965void adreno_cx_dbgc_regwrite(struct kgsl_device *device,
2966 unsigned int offsetwords, unsigned int value)
2967{
2968 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2969 unsigned int cx_dbgc_offset;
2970
2971 if (!adreno_is_cx_dbgc_register(device, offsetwords))
2972 return;
2973
2974 cx_dbgc_offset = (offsetwords << 2) - adreno_dev->cx_dbgc_base;
2975 trace_kgsl_regwrite(device, offsetwords, value);
2976
2977 /*
2978 * ensure previous writes post before this one,
2979 * i.e. act like normal writel()
2980 */
2981 wmb();
2982 __raw_writel(value, adreno_dev->cx_dbgc_virt + cx_dbgc_offset);
2983}
2984
Shrenuj Bansala419c792016-10-20 14:05:11 -07002985/**
2986 * adreno_waittimestamp - sleep while waiting for the specified timestamp
2987 * @device - pointer to a KGSL device structure
2988 * @context - pointer to the active kgsl context
2989 * @timestamp - GPU timestamp to wait for
2990 * @msecs - amount of time to wait (in milliseconds)
2991 *
2992 * Wait up to 'msecs' milliseconds for the specified timestamp to expire.
2993 */
2994static int adreno_waittimestamp(struct kgsl_device *device,
2995 struct kgsl_context *context,
2996 unsigned int timestamp,
2997 unsigned int msecs)
2998{
2999 int ret;
3000
3001 if (context == NULL) {
3002 /* If they are doing then complain once */
3003 dev_WARN_ONCE(device->dev, 1,
3004 "IOCTL_KGSL_DEVICE_WAITTIMESTAMP is deprecated\n");
3005 return -ENOTTY;
3006 }
3007
3008 /* Return -ENOENT if the context has been detached */
3009 if (kgsl_context_detached(context))
3010 return -ENOENT;
3011
3012 ret = adreno_drawctxt_wait(ADRENO_DEVICE(device), context,
3013 timestamp, msecs);
3014
3015 /* If the context got invalidated then return a specific error */
3016 if (kgsl_context_invalid(context))
3017 ret = -EDEADLK;
3018
3019 /*
3020 * Return -EPROTO if the device has faulted since the last time we
3021 * checked. Userspace uses this as a marker for performing post
3022 * fault activities
3023 */
3024
3025 if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv))
3026 ret = -EPROTO;
3027
3028 return ret;
3029}
3030
3031/**
3032 * __adreno_readtimestamp() - Reads the timestamp from memstore memory
3033 * @adreno_dev: Pointer to an adreno device
3034 * @index: Index into the memstore memory
3035 * @type: Type of timestamp to read
3036 * @timestamp: The out parameter where the timestamp is read
3037 */
3038static int __adreno_readtimestamp(struct adreno_device *adreno_dev, int index,
3039 int type, unsigned int *timestamp)
3040{
3041 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
3042 int status = 0;
3043
3044 switch (type) {
3045 case KGSL_TIMESTAMP_CONSUMED:
3046 kgsl_sharedmem_readl(&device->memstore, timestamp,
3047 KGSL_MEMSTORE_OFFSET(index, soptimestamp));
3048 break;
3049 case KGSL_TIMESTAMP_RETIRED:
3050 kgsl_sharedmem_readl(&device->memstore, timestamp,
3051 KGSL_MEMSTORE_OFFSET(index, eoptimestamp));
3052 break;
3053 default:
3054 status = -EINVAL;
3055 *timestamp = 0;
3056 break;
3057 }
3058 return status;
3059}
3060
3061/**
3062 * adreno_rb_readtimestamp(): Return the value of given type of timestamp
3063 * for a RB
3064 * @adreno_dev: adreno device whose timestamp values are being queried
3065 * @priv: The object being queried for a timestamp (expected to be a rb pointer)
3066 * @type: The type of timestamp (one of 3) to be read
3067 * @timestamp: Pointer to where the read timestamp is to be written to
3068 *
3069 * CONSUMED and RETIRED type timestamps are sorted by id and are constantly
3070 * updated by the GPU through shared memstore memory. QUEUED type timestamps
3071 * are read directly from context struct.
3072
3073 * The function returns 0 on success and timestamp value at the *timestamp
3074 * address and returns -EINVAL on any read error/invalid type and timestamp = 0.
3075 */
3076int adreno_rb_readtimestamp(struct adreno_device *adreno_dev,
3077 void *priv, enum kgsl_timestamp_type type,
3078 unsigned int *timestamp)
3079{
3080 int status = 0;
3081 struct adreno_ringbuffer *rb = priv;
3082
3083 if (type == KGSL_TIMESTAMP_QUEUED)
3084 *timestamp = rb->timestamp;
3085 else
3086 status = __adreno_readtimestamp(adreno_dev,
3087 rb->id + KGSL_MEMSTORE_MAX,
3088 type, timestamp);
3089
3090 return status;
3091}
3092
3093/**
3094 * adreno_readtimestamp(): Return the value of given type of timestamp
3095 * @device: GPU device whose timestamp values are being queried
3096 * @priv: The object being queried for a timestamp (expected to be a context)
3097 * @type: The type of timestamp (one of 3) to be read
3098 * @timestamp: Pointer to where the read timestamp is to be written to
3099 *
3100 * CONSUMED and RETIRED type timestamps are sorted by id and are constantly
3101 * updated by the GPU through shared memstore memory. QUEUED type timestamps
3102 * are read directly from context struct.
3103
3104 * The function returns 0 on success and timestamp value at the *timestamp
3105 * address and returns -EINVAL on any read error/invalid type and timestamp = 0.
3106 */
3107static int adreno_readtimestamp(struct kgsl_device *device,
3108 void *priv, enum kgsl_timestamp_type type,
3109 unsigned int *timestamp)
3110{
3111 int status = 0;
3112 struct kgsl_context *context = priv;
3113
3114 if (type == KGSL_TIMESTAMP_QUEUED) {
3115 struct adreno_context *ctxt = ADRENO_CONTEXT(context);
3116
3117 *timestamp = ctxt->timestamp;
3118 } else
3119 status = __adreno_readtimestamp(ADRENO_DEVICE(device),
3120 context->id, type, timestamp);
3121
3122 return status;
3123}
3124
3125static inline s64 adreno_ticks_to_us(u32 ticks, u32 freq)
3126{
3127 freq /= 1000000;
3128 return ticks / freq;
3129}
3130
3131/**
3132 * adreno_power_stats() - Reads the counters needed for freq decisions
3133 * @device: Pointer to device whose counters are read
3134 * @stats: Pointer to stats set that needs updating
3135 * Power: The caller is expected to be in a clock enabled state as this
3136 * function does reg reads
3137 */
3138static void adreno_power_stats(struct kgsl_device *device,
3139 struct kgsl_power_stats *stats)
3140{
3141 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
3142 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
3143 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
3144 struct adreno_busy_data *busy = &adreno_dev->busy_data;
3145 uint64_t adj = 0;
3146
3147 memset(stats, 0, sizeof(*stats));
3148
3149 /* Get the busy cycles counted since the counter was last reset */
3150 if (adreno_dev->perfctr_pwr_lo != 0) {
3151 uint64_t gpu_busy;
3152
3153 gpu_busy = counter_delta(device, adreno_dev->perfctr_pwr_lo,
3154 &busy->gpu_busy);
3155
3156 if (gpudev->read_throttling_counters) {
3157 adj = gpudev->read_throttling_counters(adreno_dev);
3158 gpu_busy += adj;
3159 }
3160
Deepak Kumar2c8ea992017-09-18 19:59:17 +05303161 if (adreno_is_a6xx(adreno_dev)) {
George Shen07550732017-06-01 11:54:16 -07003162 /* clock sourced from XO */
Maria Yu2a69efa2017-09-26 16:29:58 +08003163 stats->busy_time = gpu_busy * 10;
3164 do_div(stats->busy_time, 192);
George Shen07550732017-06-01 11:54:16 -07003165 } else {
3166 /* clock sourced from GFX3D */
3167 stats->busy_time = adreno_ticks_to_us(gpu_busy,
3168 kgsl_pwrctrl_active_freq(pwr));
3169 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07003170 }
3171
3172 if (device->pwrctrl.bus_control) {
3173 uint64_t ram_cycles = 0, starved_ram = 0;
3174
3175 if (adreno_dev->ram_cycles_lo != 0)
3176 ram_cycles = counter_delta(device,
3177 adreno_dev->ram_cycles_lo,
Deepak Kumar84b9e032017-11-08 13:08:50 +05303178 &busy->bif_ram_cycles);
3179
3180 if (adreno_has_gbif(adreno_dev)) {
3181 if (adreno_dev->ram_cycles_lo_ch1_read != 0)
3182 ram_cycles += counter_delta(device,
3183 adreno_dev->ram_cycles_lo_ch1_read,
3184 &busy->bif_ram_cycles_read_ch1);
3185
3186 if (adreno_dev->ram_cycles_lo_ch0_write != 0)
3187 ram_cycles += counter_delta(device,
3188 adreno_dev->ram_cycles_lo_ch0_write,
3189 &busy->bif_ram_cycles_write_ch0);
3190
3191 if (adreno_dev->ram_cycles_lo_ch1_write != 0)
3192 ram_cycles += counter_delta(device,
3193 adreno_dev->ram_cycles_lo_ch1_write,
3194 &busy->bif_ram_cycles_write_ch1);
3195 }
Shrenuj Bansala419c792016-10-20 14:05:11 -07003196
3197 if (adreno_dev->starved_ram_lo != 0)
3198 starved_ram = counter_delta(device,
3199 adreno_dev->starved_ram_lo,
Deepak Kumar84b9e032017-11-08 13:08:50 +05303200 &busy->bif_starved_ram);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003201
Deepak Kumarc52781f2017-11-06 16:10:17 +05303202 if (adreno_has_gbif(adreno_dev)) {
3203 if (adreno_dev->starved_ram_lo_ch1 != 0)
3204 starved_ram += counter_delta(device,
3205 adreno_dev->starved_ram_lo_ch1,
Deepak Kumar84b9e032017-11-08 13:08:50 +05303206 &busy->bif_starved_ram_ch1);
Deepak Kumarc52781f2017-11-06 16:10:17 +05303207 }
3208
Shrenuj Bansala419c792016-10-20 14:05:11 -07003209 stats->ram_time = ram_cycles;
3210 stats->ram_wait = starved_ram;
3211 }
3212 if (adreno_dev->lm_threshold_count &&
3213 gpudev->count_throttles)
3214 gpudev->count_throttles(adreno_dev, adj);
3215}
3216
3217static unsigned int adreno_gpuid(struct kgsl_device *device,
3218 unsigned int *chipid)
3219{
3220 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
3221
3222 /*
3223 * Some applications need to know the chip ID too, so pass
3224 * that as a parameter
3225 */
3226
3227 if (chipid != NULL)
3228 *chipid = adreno_dev->chipid;
3229
3230 /*
3231 * Standard KGSL gpuid format:
3232 * top word is 0x0002 for 2D or 0x0003 for 3D
3233 * Bottom word is core specific identifer
3234 */
3235
3236 return (0x0003 << 16) | ADRENO_GPUREV(adreno_dev);
3237}
3238
3239static int adreno_regulator_enable(struct kgsl_device *device)
3240{
3241 int ret = 0;
3242 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
3243 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
3244
3245 if (gpudev->regulator_enable &&
3246 !test_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
3247 &adreno_dev->priv)) {
3248 ret = gpudev->regulator_enable(adreno_dev);
3249 if (!ret)
3250 set_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
3251 &adreno_dev->priv);
3252 }
3253 return ret;
3254}
3255
3256static bool adreno_is_hw_collapsible(struct kgsl_device *device)
3257{
3258 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
3259 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
3260
3261 /*
3262 * Skip power collapse for A304, if power ctrl flag is set to
3263 * non zero. As A304 soft_reset will not work, power collapse
3264 * needs to disable to avoid soft_reset.
3265 */
3266 if (adreno_is_a304(adreno_dev) &&
3267 device->pwrctrl.ctrl_flags)
3268 return false;
3269
3270 return adreno_isidle(device) && (gpudev->is_sptp_idle ?
3271 gpudev->is_sptp_idle(adreno_dev) : true);
3272}
3273
3274static void adreno_regulator_disable(struct kgsl_device *device)
3275{
3276 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
3277 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
3278
3279 if (gpudev->regulator_disable &&
3280 test_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
3281 &adreno_dev->priv)) {
3282 gpudev->regulator_disable(adreno_dev);
3283 clear_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
3284 &adreno_dev->priv);
3285 }
3286}
3287
3288static void adreno_pwrlevel_change_settings(struct kgsl_device *device,
3289 unsigned int prelevel, unsigned int postlevel, bool post)
3290{
3291 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
3292 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
3293
3294 if (gpudev->pwrlevel_change_settings)
3295 gpudev->pwrlevel_change_settings(adreno_dev, prelevel,
3296 postlevel, post);
3297}
3298
3299static void adreno_clk_set_options(struct kgsl_device *device, const char *name,
Deepak Kumara309e0e2017-03-17 17:27:42 +05303300 struct clk *clk, bool on)
Shrenuj Bansala419c792016-10-20 14:05:11 -07003301{
3302 if (ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options)
3303 ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options(
Deepak Kumara309e0e2017-03-17 17:27:42 +05303304 ADRENO_DEVICE(device), name, clk, on);
Shrenuj Bansala419c792016-10-20 14:05:11 -07003305}
3306
3307static void adreno_iommu_sync(struct kgsl_device *device, bool sync)
3308{
3309 struct scm_desc desc = {0};
3310 int ret;
3311
3312 if (sync == true) {
3313 mutex_lock(&kgsl_mmu_sync);
3314 desc.args[0] = true;
3315 desc.arginfo = SCM_ARGS(1);
3316 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_PWR, 0x8), &desc);
3317 if (ret)
3318 KGSL_DRV_ERR(device,
3319 "MMU sync with Hypervisor off %x\n", ret);
3320 } else {
3321 desc.args[0] = false;
3322 desc.arginfo = SCM_ARGS(1);
3323 scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_PWR, 0x8), &desc);
3324 mutex_unlock(&kgsl_mmu_sync);
3325 }
3326}
3327
3328static void _regulator_disable(struct kgsl_regulator *regulator, bool poll)
3329{
3330 unsigned long wait_time = jiffies + msecs_to_jiffies(200);
3331
3332 if (IS_ERR_OR_NULL(regulator->reg))
3333 return;
3334
3335 regulator_disable(regulator->reg);
3336
3337 if (poll == false)
3338 return;
3339
3340 while (!time_after(jiffies, wait_time)) {
3341 if (!regulator_is_enabled(regulator->reg))
3342 return;
3343 cpu_relax();
3344 }
3345
3346 KGSL_CORE_ERR("regulator '%s' still on after 200ms\n", regulator->name);
3347}
3348
3349static void adreno_regulator_disable_poll(struct kgsl_device *device)
3350{
3351 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
3352 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
3353 int i;
3354
3355 /* Fast path - hopefully we don't need this quirk */
3356 if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_IOMMU_SYNC)) {
3357 for (i = KGSL_MAX_REGULATORS - 1; i >= 0; i--)
3358 _regulator_disable(&pwr->regulators[i], false);
3359 return;
3360 }
3361
3362 adreno_iommu_sync(device, true);
3363
3364 for (i = 0; i < KGSL_MAX_REGULATORS; i++)
3365 _regulator_disable(&pwr->regulators[i], true);
3366
3367 adreno_iommu_sync(device, false);
3368}
3369
3370static void adreno_gpu_model(struct kgsl_device *device, char *str,
3371 size_t bufsz)
3372{
3373 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
3374
3375 snprintf(str, bufsz, "Adreno%d%d%dv%d",
3376 ADRENO_CHIPID_CORE(adreno_dev->chipid),
3377 ADRENO_CHIPID_MAJOR(adreno_dev->chipid),
3378 ADRENO_CHIPID_MINOR(adreno_dev->chipid),
3379 ADRENO_CHIPID_PATCH(adreno_dev->chipid) + 1);
3380}
3381
3382static const struct kgsl_functable adreno_functable = {
3383 /* Mandatory functions */
3384 .regread = adreno_regread,
3385 .regwrite = adreno_regwrite,
Kyle Pieferb1027b02017-02-10 13:58:58 -08003386 .gmu_regread = adreno_gmu_regread,
3387 .gmu_regwrite = adreno_gmu_regwrite,
Shrenuj Bansala419c792016-10-20 14:05:11 -07003388 .idle = adreno_idle,
3389 .isidle = adreno_isidle,
3390 .suspend_context = adreno_suspend_context,
3391 .init = adreno_init,
3392 .start = adreno_start,
3393 .stop = adreno_stop,
3394 .getproperty = adreno_getproperty,
3395 .getproperty_compat = adreno_getproperty_compat,
3396 .waittimestamp = adreno_waittimestamp,
3397 .readtimestamp = adreno_readtimestamp,
3398 .queue_cmds = adreno_dispatcher_queue_cmds,
3399 .ioctl = adreno_ioctl,
3400 .compat_ioctl = adreno_compat_ioctl,
3401 .power_stats = adreno_power_stats,
3402 .gpuid = adreno_gpuid,
3403 .snapshot = adreno_snapshot,
3404 .irq_handler = adreno_irq_handler,
3405 .drain = adreno_drain,
3406 /* Optional functions */
Carter Cooperb88b7082017-09-14 09:03:26 -06003407 .snapshot_gmu = adreno_snapshot_gmu,
Shrenuj Bansala419c792016-10-20 14:05:11 -07003408 .drawctxt_create = adreno_drawctxt_create,
3409 .drawctxt_detach = adreno_drawctxt_detach,
3410 .drawctxt_destroy = adreno_drawctxt_destroy,
3411 .drawctxt_dump = adreno_drawctxt_dump,
3412 .setproperty = adreno_setproperty,
3413 .setproperty_compat = adreno_setproperty_compat,
3414 .drawctxt_sched = adreno_drawctxt_sched,
3415 .resume = adreno_dispatcher_start,
3416 .regulator_enable = adreno_regulator_enable,
3417 .is_hw_collapsible = adreno_is_hw_collapsible,
3418 .regulator_disable = adreno_regulator_disable,
3419 .pwrlevel_change_settings = adreno_pwrlevel_change_settings,
3420 .regulator_disable_poll = adreno_regulator_disable_poll,
3421 .clk_set_options = adreno_clk_set_options,
3422 .gpu_model = adreno_gpu_model,
Hareesh Gundua2fe6ec2017-03-06 14:53:36 +05303423 .stop_fault_timer = adreno_dispatcher_stop_fault_timer,
Shrenuj Bansala419c792016-10-20 14:05:11 -07003424};
3425
3426static struct platform_driver adreno_platform_driver = {
3427 .probe = adreno_probe,
3428 .remove = adreno_remove,
3429 .suspend = kgsl_suspend_driver,
3430 .resume = kgsl_resume_driver,
3431 .id_table = adreno_id_table,
3432 .driver = {
3433 .owner = THIS_MODULE,
3434 .name = DEVICE_3D_NAME,
3435 .pm = &kgsl_pm_ops,
3436 .of_match_table = adreno_match_table,
3437 }
3438};
3439
3440static const struct of_device_id busmon_match_table[] = {
3441 { .compatible = "qcom,kgsl-busmon", .data = &device_3d0 },
3442 {}
3443};
3444
3445static int adreno_busmon_probe(struct platform_device *pdev)
3446{
3447 struct kgsl_device *device;
3448 const struct of_device_id *pdid =
3449 of_match_device(busmon_match_table, &pdev->dev);
3450
3451 if (pdid == NULL)
3452 return -ENXIO;
3453
3454 device = (struct kgsl_device *)pdid->data;
3455 device->busmondev = &pdev->dev;
3456 dev_set_drvdata(device->busmondev, device);
3457
3458 return 0;
3459}
3460
3461static struct platform_driver kgsl_bus_platform_driver = {
3462 .probe = adreno_busmon_probe,
3463 .driver = {
3464 .owner = THIS_MODULE,
3465 .name = "kgsl-busmon",
3466 .of_match_table = busmon_match_table,
3467 }
3468};
3469
3470static int __init kgsl_3d_init(void)
3471{
3472 int ret;
3473
3474 ret = platform_driver_register(&kgsl_bus_platform_driver);
3475 if (ret)
3476 return ret;
3477
3478 ret = platform_driver_register(&adreno_platform_driver);
3479 if (ret)
3480 platform_driver_unregister(&kgsl_bus_platform_driver);
3481
3482 return ret;
3483}
3484
3485static void __exit kgsl_3d_exit(void)
3486{
3487 platform_driver_unregister(&adreno_platform_driver);
3488 platform_driver_unregister(&kgsl_bus_platform_driver);
3489}
3490
3491module_init(kgsl_3d_init);
3492module_exit(kgsl_3d_exit);
3493
3494MODULE_DESCRIPTION("3D Graphics driver");
3495MODULE_LICENSE("GPL v2");
3496MODULE_ALIAS("platform:kgsl_3d");