blob: 222c86ad5bad3ef5b7498f1d694579c7ffb778be [file] [log] [blame]
Oleg Pereletf02d27b2018-01-15 13:22:47 -08001/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
Kyle Pieferb1027b02017-02-10 13:58:58 -08002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Abhilash Kumarb07f9b02017-09-26 15:29:34 +053013#include <linux/module.h>
Kyle Pieferb1027b02017-02-10 13:58:58 -080014#include <linux/types.h>
15#include <linux/device.h>
16#include <linux/iommu.h>
17#include <linux/of_platform.h>
18#include <linux/msm-bus.h>
19#include <linux/msm-bus-board.h>
20#include <linux/pm_opp.h>
Patrick Dalyde1c64d2017-09-12 16:30:12 -070021#include <linux/io.h>
Kyle Pieferb1027b02017-02-10 13:58:58 -080022#include <soc/qcom/cmd-db.h>
23
24#include "kgsl_device.h"
25#include "kgsl_gmu.h"
26#include "kgsl_hfi.h"
27#include "a6xx_reg.h"
28#include "adreno.h"
29
Abhilash Kumarb07f9b02017-09-26 15:29:34 +053030#undef MODULE_PARAM_PREFIX
31#define MODULE_PARAM_PREFIX "kgsl_gmu."
32
33static bool nogmu;
34module_param(nogmu, bool, 0444);
35MODULE_PARM_DESC(nogmu, "Disable the GMU");
36
Kyle Pieferb1027b02017-02-10 13:58:58 -080037#define GMU_CONTEXT_USER 0
38#define GMU_CONTEXT_KERNEL 1
39#define GMU_KERNEL_ENTRIES 8
40
41enum gmu_iommu_mem_type {
42 GMU_CACHED_CODE,
43 GMU_CACHED_DATA,
44 GMU_NONCACHED_KERNEL,
45 GMU_NONCACHED_USER
46};
47
48/*
49 * GMU virtual memory mapping definitions
50 */
51struct gmu_vma {
52 unsigned int noncached_ustart;
53 unsigned int noncached_usize;
54 unsigned int noncached_kstart;
55 unsigned int noncached_ksize;
56 unsigned int cached_dstart;
57 unsigned int cached_dsize;
58 unsigned int cached_cstart;
59 unsigned int cached_csize;
60 unsigned int image_start;
61};
62
Sushmita Susheelendra73bf5762018-09-28 15:55:54 -060063#define GMU_CM3_CFG_NONMASKINTR_SHIFT 9
64
Kyle Pieferb1027b02017-02-10 13:58:58 -080065struct gmu_iommu_context {
66 const char *name;
67 struct device *dev;
68 struct iommu_domain *domain;
69};
70
71#define HFIMEM_SIZE SZ_16K
72
73#define DUMPMEM_SIZE SZ_16K
74
75/* Define target specific GMU VMA configurations */
76static const struct gmu_vma vma = {
77 /* Noncached user segment */
78 0x80000000, SZ_1G,
79 /* Noncached kernel segment */
80 0x60000000, SZ_512M,
81 /* Cached data segment */
82 0x44000, (SZ_256K-SZ_16K),
83 /* Cached code segment */
84 0x0, (SZ_256K-SZ_16K),
85 /* FW image */
86 0x0,
87};
88
89struct gmu_iommu_context gmu_ctx[] = {
90 [GMU_CONTEXT_USER] = { .name = "gmu_user" },
91 [GMU_CONTEXT_KERNEL] = { .name = "gmu_kernel" }
92};
93
94/*
95 * There are a few static memory buffers that are allocated and mapped at boot
96 * time for GMU to function. The buffers are permanent (not freed) after
97 * GPU boot. The size of the buffers are constant and not expected to change.
98 *
99 * We define an array and a simple allocator to keep track of the currently
100 * active SMMU entries of GMU kernel mode context. Each entry is assigned
101 * a unique address inside GMU kernel mode address range. The addresses
102 * are assigned sequentially and aligned to 1MB each.
103 *
104 */
105static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES];
106static unsigned long gmu_kmem_bitmap;
107
Kyle Piefer11a48b62017-03-17 14:53:40 -0700108/*
109 * kgsl_gmu_isenabled() - Check if there is a GMU and it is enabled
110 * @device: Pointer to the KGSL device that owns the GMU
111 *
112 * Check if a GMU has been found and successfully probed. Also
113 * check that the feature flag to use a GMU is enabled. Returns
114 * true if both of these conditions are met, otherwise false.
115 */
116bool kgsl_gmu_isenabled(struct kgsl_device *device)
117{
118 struct gmu_device *gmu = &device->gmu;
119 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
120
Abhilash Kumarb07f9b02017-09-26 15:29:34 +0530121 if (!nogmu && gmu->pdev &&
122 ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
Kyle Piefer11a48b62017-03-17 14:53:40 -0700123 return true;
Kyle Piefer11a48b62017-03-17 14:53:40 -0700124 return false;
125}
126
Kyle Pieferb1027b02017-02-10 13:58:58 -0800127static int _gmu_iommu_fault_handler(struct device *dev,
128 unsigned long addr, int flags, const char *name)
129{
130 char *fault_type = "unknown";
131
132 if (flags & IOMMU_FAULT_TRANSLATION)
133 fault_type = "translation";
134 else if (flags & IOMMU_FAULT_PERMISSION)
135 fault_type = "permission";
Deepak Kumar8267e992018-04-26 11:16:55 +0530136 else if (flags & IOMMU_FAULT_EXTERNAL)
137 fault_type = "external";
138 else if (flags & IOMMU_FAULT_TRANSACTION_STALLED)
139 fault_type = "transaction stalled";
Kyle Pieferb1027b02017-02-10 13:58:58 -0800140
141 dev_err(dev, "GMU fault addr = %lX, context=%s (%s %s fault)\n",
142 addr, name,
143 (flags & IOMMU_FAULT_WRITE) ? "write" : "read",
144 fault_type);
145
146 return 0;
147}
148
149static int gmu_kernel_fault_handler(struct iommu_domain *domain,
150 struct device *dev, unsigned long addr, int flags, void *token)
151{
152 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_kernel");
153}
154
155static int gmu_user_fault_handler(struct iommu_domain *domain,
156 struct device *dev, unsigned long addr, int flags, void *token)
157{
158 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_user");
159}
160
161static void free_gmu_mem(struct gmu_device *gmu,
162 struct gmu_memdesc *md)
163{
164 /* Free GMU image memory */
165 if (md->hostptr)
166 dma_free_attrs(&gmu->pdev->dev, (size_t) md->size,
167 (void *)md->hostptr, md->physaddr, 0);
168 memset(md, 0, sizeof(*md));
169}
170
171static int alloc_and_map(struct gmu_device *gmu, unsigned int ctx_id,
172 struct gmu_memdesc *md, unsigned int attrs)
173{
174 int ret;
175 struct iommu_domain *domain;
176
177 domain = gmu_ctx[ctx_id].domain;
178
179 md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size,
180 &md->physaddr, GFP_KERNEL, 0);
181
182 if (md->hostptr == NULL)
183 return -ENOMEM;
184
185 ret = iommu_map(domain, md->gmuaddr,
186 md->physaddr, md->size,
187 attrs);
188
189 if (ret) {
190 dev_err(&gmu->pdev->dev,
Maria Yu0b515412017-09-26 15:43:31 +0800191 "gmu map err: gaddr=0x%016llX, paddr=0x%pa\n",
192 md->gmuaddr, &(md->physaddr));
Kyle Pieferb1027b02017-02-10 13:58:58 -0800193 free_gmu_mem(gmu, md);
194 }
195
196 return ret;
197}
198
199/*
200 * allocate_gmu_image() - allocates & maps memory for FW image, the size
201 * shall come from the loaded f/w file. Firmware image size shall be
202 * less than code cache size. Otherwise, FW may experience performance issue.
203 * @gmu: Pointer to GMU device
204 * @size: Requested allocation size
205 */
206int allocate_gmu_image(struct gmu_device *gmu, unsigned int size)
207{
208 struct gmu_memdesc *md = &gmu->fw_image;
209
210 if (size > vma.cached_csize) {
211 dev_err(&gmu->pdev->dev,
212 "GMU firmware size too big: %d\n", size);
213 return -EINVAL;
214 }
215
216 md->size = size;
217 md->gmuaddr = vma.image_start;
218 md->attr = GMU_CACHED_CODE;
219
220 return alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, IOMMU_READ);
221}
222
223/*
224 * allocate_gmu_kmem() - allocates and maps GMU kernel shared memory
225 * @gmu: Pointer to GMU device
226 * @size: Requested size
227 * @attrs: IOMMU mapping attributes
228 */
229static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
230 unsigned int size, unsigned int attrs)
231{
232 struct gmu_memdesc *md;
233 int ret, entry_idx = find_first_zero_bit(
234 &gmu_kmem_bitmap, GMU_KERNEL_ENTRIES);
235
236 size = PAGE_ALIGN(size);
237
238 if (size > SZ_1M || size == 0) {
239 dev_err(&gmu->pdev->dev,
240 "Requested %d bytes of GMU kernel memory, max=1MB\n",
241 size);
242 return ERR_PTR(-EINVAL);
243 }
244
245 if (entry_idx >= GMU_KERNEL_ENTRIES) {
246 dev_err(&gmu->pdev->dev,
247 "Ran out of GMU kernel mempool slots\n");
248 return ERR_PTR(-EINVAL);
249 }
250
251 /* Allocate GMU virtual memory */
252 md = &gmu_kmem_entries[entry_idx];
253 md->gmuaddr = vma.noncached_kstart + (entry_idx * SZ_1M);
254 set_bit(entry_idx, &gmu_kmem_bitmap);
255 md->attr = GMU_NONCACHED_KERNEL;
256 md->size = size;
257
258 ret = alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, attrs);
259
260 if (ret) {
261 clear_bit(entry_idx, &gmu_kmem_bitmap);
262 md->gmuaddr = 0;
263 return ERR_PTR(ret);
264 }
265
266 return md;
267}
268
269static int gmu_iommu_cb_probe(struct gmu_device *gmu,
270 struct gmu_iommu_context *ctx,
271 struct device_node *node)
272{
273 struct platform_device *pdev = of_find_device_by_node(node);
274 struct device *dev;
275 int ret;
276
277 dev = &pdev->dev;
278
279 ctx->dev = dev;
280 ctx->domain = iommu_domain_alloc(&platform_bus_type);
281 if (ctx->domain == NULL) {
282 dev_err(&gmu->pdev->dev, "gmu iommu fail to alloc %s domain\n",
283 ctx->name);
284 return -ENODEV;
285 }
286
287 ret = iommu_attach_device(ctx->domain, dev);
288 if (ret) {
289 dev_err(&gmu->pdev->dev, "gmu iommu fail to attach %s device\n",
290 ctx->name);
291 iommu_domain_free(ctx->domain);
292 }
293
294 return ret;
295}
296
297static struct {
298 const char *compatible;
299 int index;
300 iommu_fault_handler_t hdlr;
301} cbs[] = {
302 { "qcom,smmu-gmu-user-cb",
303 GMU_CONTEXT_USER,
304 gmu_user_fault_handler,
305 },
306 { "qcom,smmu-gmu-kernel-cb",
307 GMU_CONTEXT_KERNEL,
308 gmu_kernel_fault_handler,
309 },
310};
311
312/*
313 * gmu_iommu_init() - probe IOMMU context banks used by GMU
314 * and attach GMU device
315 * @gmu: Pointer to GMU device
316 * @node: Pointer to GMU device node
317 */
Prakash Kamliyad6824d02018-03-13 15:40:50 +0530318static int gmu_iommu_init(struct gmu_device *gmu, struct device_node *node)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800319{
320 struct device_node *child;
321 struct gmu_iommu_context *ctx = NULL;
322 int ret, i;
323
324 of_platform_populate(node, NULL, NULL, &gmu->pdev->dev);
325
326 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
327 child = of_find_compatible_node(node, NULL, cbs[i].compatible);
328 if (child) {
329 ctx = &gmu_ctx[cbs[i].index];
330 ret = gmu_iommu_cb_probe(gmu, ctx, child);
331 if (ret)
332 return ret;
333 iommu_set_fault_handler(ctx->domain,
334 cbs[i].hdlr, ctx);
335 }
336 }
337
338 for (i = 0; i < ARRAY_SIZE(gmu_ctx); i++) {
339 if (gmu_ctx[i].domain == NULL) {
340 dev_err(&gmu->pdev->dev,
341 "Missing GMU %s context bank node\n",
342 gmu_ctx[i].name);
343 return -EINVAL;
344 }
345 }
346
347 return 0;
348}
349
350/*
351 * gmu_kmem_close() - free all kernel memory allocated for GMU and detach GMU
352 * from IOMMU context banks.
353 * @gmu: Pointer to GMU device
354 */
Prakash Kamliyad6824d02018-03-13 15:40:50 +0530355static void gmu_kmem_close(struct gmu_device *gmu)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800356{
357 int i;
358 struct gmu_memdesc *md = &gmu->fw_image;
359 struct gmu_iommu_context *ctx = &gmu_ctx[GMU_CONTEXT_KERNEL];
360
361 /* Free GMU image memory */
362 free_gmu_mem(gmu, md);
363
364 /* Unmap image memory */
365 iommu_unmap(ctx->domain,
366 gmu->fw_image.gmuaddr,
367 gmu->fw_image.size);
368
369
370 gmu->hfi_mem = NULL;
371 gmu->dump_mem = NULL;
372
373 /* Unmap all memories in GMU kernel memory pool */
374 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
375 struct gmu_memdesc *memptr = &gmu_kmem_entries[i];
376
377 if (memptr->gmuaddr)
378 iommu_unmap(ctx->domain, memptr->gmuaddr, memptr->size);
379 }
380
381 /* Free GMU shared kernel memory */
382 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
383 md = &gmu_kmem_entries[i];
384 free_gmu_mem(gmu, md);
385 clear_bit(i, &gmu_kmem_bitmap);
386 }
387
388 /* Detach the device from SMMU context bank */
389 iommu_detach_device(ctx->domain, ctx->dev);
390
391 /* free kernel mem context */
392 iommu_domain_free(ctx->domain);
393}
394
Prakash Kamliyad6824d02018-03-13 15:40:50 +0530395static void gmu_memory_close(struct gmu_device *gmu)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800396{
397 gmu_kmem_close(gmu);
398 /* Free user memory context */
399 iommu_domain_free(gmu_ctx[GMU_CONTEXT_USER].domain);
400
401}
402
403/*
404 * gmu_memory_probe() - probe GMU IOMMU context banks and allocate memory
405 * to share with GMU in kernel mode.
406 * @gmu: Pointer to GMU device
407 * @node: Pointer to GMU device node
408 */
Prakash Kamliyad6824d02018-03-13 15:40:50 +0530409static int gmu_memory_probe(struct gmu_device *gmu, struct device_node *node)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800410{
411 int ret;
412
413 ret = gmu_iommu_init(gmu, node);
414 if (ret)
415 return ret;
416
417 /* Allocates & maps memory for HFI */
418 gmu->hfi_mem = allocate_gmu_kmem(gmu, HFIMEM_SIZE,
419 (IOMMU_READ | IOMMU_WRITE));
420 if (IS_ERR(gmu->hfi_mem)) {
421 ret = PTR_ERR(gmu->hfi_mem);
422 goto err_ret;
423 }
424
425 /* Allocates & maps GMU crash dump memory */
426 gmu->dump_mem = allocate_gmu_kmem(gmu, DUMPMEM_SIZE,
427 (IOMMU_READ | IOMMU_WRITE));
428 if (IS_ERR(gmu->dump_mem)) {
429 ret = PTR_ERR(gmu->dump_mem);
430 goto err_ret;
431 }
432
433 return 0;
434err_ret:
435 gmu_memory_close(gmu);
436 return ret;
437}
438
439/*
440 * gmu_dcvs_set() - request GMU to change GPU frequency and/or bandwidth.
441 * @gmu: Pointer to GMU device
442 * @gpu_pwrlevel: index to GPU DCVS table used by KGSL
443 * @bus_level: index to GPU bus table used by KGSL
444 *
445 * The function converts GPU power level and bus level index used by KGSL
446 * to index being used by GMU/RPMh.
447 */
448int gmu_dcvs_set(struct gmu_device *gmu,
449 unsigned int gpu_pwrlevel, unsigned int bus_level)
450{
451 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
452 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
453 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
454 int perf_idx = INVALID_DCVS_IDX, bw_idx = INVALID_DCVS_IDX;
Deepak Kumar1db26e72018-06-04 15:51:11 +0530455 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800456
George Shena2f7b432017-08-18 12:58:18 -0700457 if (gpu_pwrlevel < gmu->num_gpupwrlevels - 1)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800458 perf_idx = gmu->num_gpupwrlevels - gpu_pwrlevel - 1;
459
George Shena2f7b432017-08-18 12:58:18 -0700460 if (bus_level < gmu->num_bwlevels && bus_level > 0)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800461 bw_idx = bus_level;
462
463 if ((perf_idx == INVALID_DCVS_IDX) &&
464 (bw_idx == INVALID_DCVS_IDX))
465 return -EINVAL;
466
Deepak Kumar1db26e72018-06-04 15:51:11 +0530467 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
George Shena2f7b432017-08-18 12:58:18 -0700468 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev,
Kyle Pieferb1027b02017-02-10 13:58:58 -0800469 GMU_DCVS_NOHFI, perf_idx, bw_idx);
Deepak Kumar1db26e72018-06-04 15:51:11 +0530470 else if (test_bit(GMU_HFI_ON, &gmu->flags))
471 ret = hfi_send_dcvs_vote(gmu, perf_idx, bw_idx, ACK_NONBLOCK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800472
Deepak Kumar1db26e72018-06-04 15:51:11 +0530473 if (ret) {
474 dev_err_ratelimited(&gmu->pdev->dev,
475 "Failed to set GPU perf idx %d, bw idx %d\n",
476 perf_idx, bw_idx);
George Shena2f7b432017-08-18 12:58:18 -0700477
Deepak Kumar1db26e72018-06-04 15:51:11 +0530478 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
479 adreno_dispatcher_schedule(device);
George Shena2f7b432017-08-18 12:58:18 -0700480 }
481
Deepak Kumar1db26e72018-06-04 15:51:11 +0530482 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800483}
484
485struct rpmh_arc_vals {
486 unsigned int num;
487 uint16_t val[MAX_GX_LEVELS];
488};
489
490static const char gfx_res_id[] = "gfx.lvl";
491static const char cx_res_id[] = "cx.lvl";
492static const char mx_res_id[] = "mx.lvl";
493
494enum rpmh_vote_type {
495 GPU_ARC_VOTE = 0,
496 GMU_ARC_VOTE,
497 INVALID_ARC_VOTE,
498};
499
500static const char debug_strs[][8] = {
501 [GPU_ARC_VOTE] = "gpu",
502 [GMU_ARC_VOTE] = "gmu",
503};
504
505/*
506 * rpmh_arc_cmds() - query RPMh command database for GX/CX/MX rail
507 * VLVL tables. The index of table will be used by GMU to vote rail
508 * voltage.
509 *
510 * @gmu: Pointer to GMU device
511 * @arc: Pointer to RPMh rail controller (ARC) voltage table
512 * @res_id: Pointer to 8 char array that contains rail name
513 */
514static int rpmh_arc_cmds(struct gmu_device *gmu,
515 struct rpmh_arc_vals *arc, const char *res_id)
516{
517 unsigned int len;
518
519 len = cmd_db_get_aux_data_len(res_id);
Kyle Pieferec7b4192017-08-17 15:35:36 -0700520 if (len == 0)
521 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800522
523 if (len > (MAX_GX_LEVELS << 1)) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800524 dev_err(&gmu->pdev->dev,
525 "gfx cmddb size %d larger than alloc buf %d of %s\n",
526 len, (MAX_GX_LEVELS << 1), res_id);
527 return -EINVAL;
528 }
529
530 cmd_db_get_aux_data(res_id, (uint8_t *)arc->val, len);
Kyle Pieferec7b4192017-08-17 15:35:36 -0700531
532 /*
533 * cmd_db_get_aux_data() gives us a zero-padded table of
534 * size len that contains the arc values. To determine the
535 * number of arc values, we loop through the table and count
536 * them until we get to the end of the buffer or hit the
537 * zero padding.
538 */
Archana Sriram8ce571b2017-10-12 18:33:33 +0530539 for (arc->num = 1; arc->num < (len >> 1); arc->num++) {
540 if (arc->val[arc->num - 1] >= arc->val[arc->num])
George Shen07b4f782017-07-13 10:42:53 -0700541 break;
542 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800543
544 return 0;
545}
546
547/*
548 * setup_volt_dependency_tbl() - set up GX->MX or CX->MX rail voltage
549 * dependencies. Second rail voltage shall be equal to or higher than
550 * primary rail voltage. VLVL table index was used by RPMh for PMIC
551 * voltage setting.
552 * @votes: Pointer to a ARC vote descriptor
553 * @pri_rail: Pointer to primary power rail VLVL table
554 * @sec_rail: Pointer to second/dependent power rail VLVL table
555 * @vlvl: Pointer to VLVL table being used by GPU or GMU driver, a subset
556 * of pri_rail VLVL table
557 * @num_entries: Valid number of entries in table pointed by "vlvl" parameter
558 */
559static int setup_volt_dependency_tbl(struct arc_vote_desc *votes,
560 struct rpmh_arc_vals *pri_rail, struct rpmh_arc_vals *sec_rail,
561 unsigned int *vlvl, unsigned int num_entries)
562{
563 int i, j, k;
564 uint16_t cur_vlvl;
George Shen07b4f782017-07-13 10:42:53 -0700565 bool found_match;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800566
567 /* i tracks current KGSL GPU frequency table entry
568 * j tracks second rail voltage table entry
569 * k tracks primary rail voltage table entry
570 */
George Shen07b4f782017-07-13 10:42:53 -0700571 for (i = 0; i < num_entries; i++) {
572 found_match = false;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800573
George Shen07b4f782017-07-13 10:42:53 -0700574 /* Look for a primary rail voltage that matches a VLVL level */
575 for (k = 0; k < pri_rail->num; k++) {
576 if (pri_rail->val[k] == vlvl[i]) {
577 votes[i].pri_idx = k;
578 votes[i].vlvl = vlvl[i];
579 cur_vlvl = vlvl[i];
580 found_match = true;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800581 break;
582 }
583 }
584
George Shen07b4f782017-07-13 10:42:53 -0700585 /* If we did not find a matching VLVL level then abort */
586 if (!found_match)
587 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800588
George Shen07b4f782017-07-13 10:42:53 -0700589 /*
590 * Look for a secondary rail index whose VLVL value
591 * is greater than or equal to the VLVL value of the
592 * corresponding index of the primary rail
593 */
594 for (j = 0; j < sec_rail->num; j++) {
595 if (sec_rail->val[j] >= cur_vlvl ||
596 j + 1 == sec_rail->num) {
597 votes[i].sec_idx = j;
598 break;
599 }
600 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800601 }
602 return 0;
603}
604
605/*
606 * rpmh_arc_votes_init() - initialized RPMh votes needed for rails voltage
607 * scaling by GMU.
608 * @gmu: Pointer to GMU device
609 * @pri_rail: Pointer to primary power rail VLVL table
610 * @sec_rail: Pointer to second/dependent power rail VLVL table
611 * of pri_rail VLVL table
612 * @type: the type of the primary rail, GPU or GMU
613 */
614static int rpmh_arc_votes_init(struct gmu_device *gmu,
615 struct rpmh_arc_vals *pri_rail,
616 struct rpmh_arc_vals *sec_rail,
617 unsigned int type)
618{
George Shen07b4f782017-07-13 10:42:53 -0700619 struct device *dev;
620 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800621 unsigned int num_freqs;
622 struct arc_vote_desc *votes;
623 unsigned int vlvl_tbl[MAX_GX_LEVELS];
624 unsigned int *freq_tbl;
625 int i, ret;
George Shen07b4f782017-07-13 10:42:53 -0700626 struct dev_pm_opp *opp;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800627
628 if (type == GPU_ARC_VOTE) {
629 num_freqs = gmu->num_gpupwrlevels;
630 votes = gmu->rpmh_votes.gx_votes;
George Shen07b4f782017-07-13 10:42:53 -0700631 freq_tbl = gmu->gpu_freqs;
632 dev = &device->pdev->dev;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800633 } else if (type == GMU_ARC_VOTE) {
634 num_freqs = gmu->num_gmupwrlevels;
635 votes = gmu->rpmh_votes.cx_votes;
George Shen07b4f782017-07-13 10:42:53 -0700636 freq_tbl = gmu->gmu_freqs;
637 dev = &gmu->pdev->dev;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800638 } else {
639 return -EINVAL;
640 }
641
642 if (num_freqs > pri_rail->num) {
643 dev_err(&gmu->pdev->dev,
644 "%s defined more DCVS levels than RPMh can support\n",
645 debug_strs[type]);
646 return -EINVAL;
647 }
648
George Shen07b4f782017-07-13 10:42:53 -0700649 memset(vlvl_tbl, 0, sizeof(vlvl_tbl));
Kyle Pieferb1027b02017-02-10 13:58:58 -0800650 for (i = 0; i < num_freqs; i++) {
George Shen07b4f782017-07-13 10:42:53 -0700651 /* Hardcode VLVL for 0 because it is not registered in OPP */
652 if (freq_tbl[i] == 0) {
653 vlvl_tbl[i] = 0;
654 continue;
655 }
656
657 /* Otherwise get the value from the OPP API */
658 opp = dev_pm_opp_find_freq_exact(dev, freq_tbl[i], true);
659 if (IS_ERR(opp)) {
660 dev_err(&gmu->pdev->dev,
661 "Failed to find opp freq %d of %s\n",
662 freq_tbl[i], debug_strs[type]);
663 return PTR_ERR(opp);
664 }
665
666 /* Values from OPP framework are offset by 1 */
667 vlvl_tbl[i] = dev_pm_opp_get_voltage(opp) - 1;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800668 }
669
670 ret = setup_volt_dependency_tbl(votes,
671 pri_rail, sec_rail, vlvl_tbl, num_freqs);
672
673 if (ret)
674 dev_err(&gmu->pdev->dev, "%s rail volt failed to match DT freqs\n",
675 debug_strs[type]);
676
677 return ret;
678}
679
680/*
681 * build_rpmh_bw_votes() - build TCS commands to vote for bandwidth.
682 * Each command sets frequency of a node along path to DDR or CNOC.
683 * @rpmh_vote: Pointer to RPMh vote needed by GMU to set BW via RPMh
684 * @num_usecases: Number of BW use cases (or BW levels)
685 * @handle: Provided by bus driver. It contains TCS command sets for
686 * all BW use cases of a bus client.
687 */
688static void build_rpmh_bw_votes(struct gmu_bw_votes *rpmh_vote,
689 unsigned int num_usecases, struct msm_bus_tcs_handle handle)
690{
691 struct msm_bus_tcs_usecase *tmp;
692 int i, j;
693
694 for (i = 0; i < num_usecases; i++) {
695 tmp = &handle.usecases[i];
696 for (j = 0; j < tmp->num_cmds; j++) {
697 if (!i) {
698 /*
699 * Wait bitmask and TCS command addresses are
700 * same for all bw use cases. To save data volume
701 * exchanged between driver and GMU, only
702 * transfer bitmasks and TCS command addresses
703 * of first set of bw use case
704 */
705 rpmh_vote->cmds_per_bw_vote = tmp->num_cmds;
706 rpmh_vote->cmds_wait_bitmask =
707 tmp->cmds[j].complete ?
708 rpmh_vote->cmds_wait_bitmask
709 | BIT(i)
710 : rpmh_vote->cmds_wait_bitmask
711 & (~BIT(i));
712 rpmh_vote->cmd_addrs[j] = tmp->cmds[j].addr;
713 }
714 rpmh_vote->cmd_data[i][j] = tmp->cmds[j].data;
715 }
716 }
717}
718
719/*
720 * gmu_bus_vote_init - initialized RPMh votes needed for bw scaling by GMU.
721 * @gmu: Pointer to GMU device
722 * @pwr: Pointer to KGSL power controller
723 */
724static int gmu_bus_vote_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
725{
726 struct msm_bus_tcs_usecase *usecases;
727 struct msm_bus_tcs_handle hdl;
728 struct rpmh_votes_t *votes = &gmu->rpmh_votes;
729 int ret;
730
731 usecases = kcalloc(gmu->num_bwlevels, sizeof(*usecases), GFP_KERNEL);
732 if (!usecases)
733 return -ENOMEM;
734
735 hdl.num_usecases = gmu->num_bwlevels;
736 hdl.usecases = usecases;
737
738 /*
739 * Query TCS command set for each use case defined in GPU b/w table
740 */
741 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->pcl);
742 if (ret)
743 return ret;
744
745 build_rpmh_bw_votes(&votes->ddr_votes, gmu->num_bwlevels, hdl);
746
747 /*
748 *Query CNOC TCS command set for each use case defined in cnoc bw table
749 */
750 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->ccl);
751 if (ret)
752 return ret;
753
754 build_rpmh_bw_votes(&votes->cnoc_votes, gmu->num_cnocbwlevels, hdl);
755
756 kfree(usecases);
757
758 return 0;
759}
760
Prakash Kamliyad6824d02018-03-13 15:40:50 +0530761static int gmu_rpmh_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800762{
763 struct rpmh_arc_vals gfx_arc, cx_arc, mx_arc;
764 int ret;
765
766 /* Populate BW vote table */
767 ret = gmu_bus_vote_init(gmu, pwr);
768 if (ret)
769 return ret;
770
771 /* Populate GPU and GMU frequency vote table */
772 ret = rpmh_arc_cmds(gmu, &gfx_arc, gfx_res_id);
773 if (ret)
774 return ret;
775
776 ret = rpmh_arc_cmds(gmu, &cx_arc, cx_res_id);
777 if (ret)
778 return ret;
779
780 ret = rpmh_arc_cmds(gmu, &mx_arc, mx_res_id);
781 if (ret)
782 return ret;
783
784 ret = rpmh_arc_votes_init(gmu, &gfx_arc, &mx_arc, GPU_ARC_VOTE);
785 if (ret)
786 return ret;
787
788 return rpmh_arc_votes_init(gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE);
789}
790
Sushmita Susheelendra73bf5762018-09-28 15:55:54 -0600791static void send_nmi_to_gmu(struct adreno_device *adreno_dev)
792{
793 /* Mask so there's no interrupt caused by NMI */
794 adreno_write_gmureg(adreno_dev,
795 ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
796
797 /* Make sure the interrupt is masked before causing it */
798 wmb();
799 adreno_write_gmureg(adreno_dev,
800 ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
801 adreno_write_gmureg(adreno_dev,
802 ADRENO_REG_GMU_CM3_CFG,
803 (1 << GMU_CM3_CFG_NONMASKINTR_SHIFT));
804
805 /* Make sure the NMI is invoked before we proceed*/
806 wmb();
807}
808
Kyle Pieferb1027b02017-02-10 13:58:58 -0800809static irqreturn_t gmu_irq_handler(int irq, void *data)
810{
811 struct gmu_device *gmu = data;
812 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700813 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferf7434db2018-05-03 15:50:43 -0700814 unsigned int mask, status = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800815
Kyle Piefere7b06b42017-04-06 13:53:01 -0700816 adreno_read_gmureg(ADRENO_DEVICE(device),
817 ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, &status);
818 adreno_write_gmureg(ADRENO_DEVICE(device),
819 ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800820
Kyle Piefere7b06b42017-04-06 13:53:01 -0700821 /* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700822 if (status & GMU_INT_WDOG_BITE) {
Kyle Pieferf7434db2018-05-03 15:50:43 -0700823 /* Temporarily mask the watchdog interrupt to prevent a storm */
824 adreno_read_gmureg(adreno_dev,
825 ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, &mask);
826 adreno_write_gmureg(adreno_dev,
827 ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
828 (mask | GMU_INT_WDOG_BITE));
829
Sushmita Susheelendra73bf5762018-09-28 15:55:54 -0600830 send_nmi_to_gmu(adreno_dev);
831 /*
832 * There is sufficient delay for the GMU to have finished
833 * handling the NMI before snapshot is taken, as the fault
834 * worker is scheduled below.
835 */
836
Kyle Piefere7b06b42017-04-06 13:53:01 -0700837 dev_err_ratelimited(&gmu->pdev->dev,
838 "GMU watchdog expired interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700839 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
840 adreno_dispatcher_schedule(device);
841 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700842 if (status & GMU_INT_HOST_AHB_BUS_ERR)
843 dev_err_ratelimited(&gmu->pdev->dev,
844 "AHB bus error interrupt received\n");
George Shenc59500e2017-10-11 14:37:32 -0700845 if (status & GMU_INT_FENCE_ERR) {
846 unsigned int fence_status;
847
848 adreno_read_gmureg(ADRENO_DEVICE(device),
849 ADRENO_REG_GMU_AHB_FENCE_STATUS, &fence_status);
850 dev_err_ratelimited(&gmu->pdev->dev,
851 "FENCE error interrupt received %x\n", fence_status);
852 }
853
Kyle Piefere7b06b42017-04-06 13:53:01 -0700854 if (status & ~GMU_AO_INT_MASK)
855 dev_err_ratelimited(&gmu->pdev->dev,
856 "Unhandled GMU interrupts 0x%lx\n",
857 status & ~GMU_AO_INT_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800858
Kyle Piefere7b06b42017-04-06 13:53:01 -0700859 return IRQ_HANDLED;
860}
Kyle Pieferb1027b02017-02-10 13:58:58 -0800861
Kyle Piefere7b06b42017-04-06 13:53:01 -0700862static irqreturn_t hfi_irq_handler(int irq, void *data)
863{
864 struct kgsl_hfi *hfi = data;
865 struct gmu_device *gmu = container_of(hfi, struct gmu_device, hfi);
866 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700867 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -0700868 unsigned int status = 0;
869
870 adreno_read_gmureg(ADRENO_DEVICE(device),
871 ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
872 adreno_write_gmureg(ADRENO_DEVICE(device),
873 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status);
874
875 if (status & HFI_IRQ_MSGQ_MASK)
876 tasklet_hi_schedule(&hfi->tasklet);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700877 if (status & HFI_IRQ_CM3_FAULT_MASK) {
Kyle Piefere7b06b42017-04-06 13:53:01 -0700878 dev_err_ratelimited(&gmu->pdev->dev,
879 "GMU CM3 fault interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700880 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
881 adreno_dispatcher_schedule(device);
882 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700883 if (status & ~HFI_IRQ_MASK)
884 dev_err_ratelimited(&gmu->pdev->dev,
885 "Unhandled HFI interrupts 0x%lx\n",
886 status & ~HFI_IRQ_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800887
888 return IRQ_HANDLED;
889}
890
891static int gmu_pwrlevel_probe(struct gmu_device *gmu, struct device_node *node)
892{
893 struct device_node *pwrlevel_node, *child;
894
895 pwrlevel_node = of_find_node_by_name(node, "qcom,gmu-pwrlevels");
896
897 if (pwrlevel_node == NULL) {
898 dev_err(&gmu->pdev->dev, "Unable to find 'qcom,gmu-pwrlevels'\n");
899 return -EINVAL;
900 }
901
902 gmu->num_gmupwrlevels = 0;
903
904 for_each_child_of_node(pwrlevel_node, child) {
905 unsigned int index;
906
907 if (of_property_read_u32(child, "reg", &index))
908 return -EINVAL;
909
910 if (index >= MAX_CX_LEVELS) {
911 dev_err(&gmu->pdev->dev, "gmu pwrlevel %d is out of range\n",
912 index);
913 continue;
914 }
915
916 if (index >= gmu->num_gmupwrlevels)
917 gmu->num_gmupwrlevels = index + 1;
918
919 if (of_property_read_u32(child, "qcom,gmu-freq",
920 &gmu->gmu_freqs[index]))
921 return -EINVAL;
922 }
923
924 return 0;
925}
926
George Shencc7e1092017-08-30 10:45:52 -0700927static int gmu_reg_probe(struct gmu_device *gmu, const char *name, bool is_gmu)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800928{
929 struct resource *res;
930
931 res = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM, name);
932 if (res == NULL) {
933 dev_err(&gmu->pdev->dev,
934 "platform_get_resource %s failed\n", name);
935 return -EINVAL;
936 }
937
938 if (res->start == 0 || resource_size(res) == 0) {
939 dev_err(&gmu->pdev->dev,
940 "dev %d %s invalid register region\n",
941 gmu->pdev->dev.id, name);
942 return -EINVAL;
943 }
944
George Shencc7e1092017-08-30 10:45:52 -0700945 if (is_gmu) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800946 gmu->reg_phys = res->start;
947 gmu->reg_len = resource_size(res);
948 gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
949 resource_size(res));
950
951 if (gmu->reg_virt == NULL) {
952 dev_err(&gmu->pdev->dev, "GMU regs ioremap failed\n");
953 return -ENODEV;
954 }
955
George Shencc7e1092017-08-30 10:45:52 -0700956 } else {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800957 gmu->pdc_reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
958 resource_size(res));
959 if (gmu->pdc_reg_virt == NULL) {
960 dev_err(&gmu->pdev->dev, "PDC regs ioremap failed\n");
961 return -ENODEV;
962 }
963 }
964
965 return 0;
966}
967
968static int gmu_clocks_probe(struct gmu_device *gmu, struct device_node *node)
969{
970 const char *cname;
971 struct property *prop;
972 struct clk *c;
973 int i = 0;
974
975 of_property_for_each_string(node, "clock-names", prop, cname) {
976 c = devm_clk_get(&gmu->pdev->dev, cname);
977
978 if (IS_ERR(c)) {
979 dev_err(&gmu->pdev->dev,
980 "dt: Couldn't get GMU clock: %s\n", cname);
981 return PTR_ERR(c);
982 }
983
984 if (i >= MAX_GMU_CLKS) {
985 dev_err(&gmu->pdev->dev,
986 "dt: too many GMU clocks defined\n");
987 return -EINVAL;
988 }
989
990 gmu->clks[i++] = c;
991 }
992
993 return 0;
994}
995
996static int gmu_gpu_bw_probe(struct gmu_device *gmu)
997{
998 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
999 struct msm_bus_scale_pdata *bus_scale_table;
1000
1001 bus_scale_table = msm_bus_cl_get_pdata(device->pdev);
1002 if (bus_scale_table == NULL) {
1003 dev_err(&gmu->pdev->dev, "dt: cannot get bus table\n");
1004 return -ENODEV;
1005 }
1006
1007 gmu->num_bwlevels = bus_scale_table->num_usecases;
1008 gmu->pcl = msm_bus_scale_register_client(bus_scale_table);
1009 if (!gmu->pcl) {
1010 dev_err(&gmu->pdev->dev, "dt: cannot register bus client\n");
1011 return -ENODEV;
1012 }
1013
1014 return 0;
1015}
1016
1017static int gmu_cnoc_bw_probe(struct gmu_device *gmu)
1018{
1019 struct msm_bus_scale_pdata *cnoc_table;
1020
1021 cnoc_table = msm_bus_cl_get_pdata(gmu->pdev);
1022 if (cnoc_table == NULL) {
1023 dev_err(&gmu->pdev->dev, "dt: cannot get cnoc table\n");
1024 return -ENODEV;
1025 }
1026
1027 gmu->num_cnocbwlevels = cnoc_table->num_usecases;
1028 gmu->ccl = msm_bus_scale_register_client(cnoc_table);
1029 if (!gmu->ccl) {
1030 dev_err(&gmu->pdev->dev, "dt: cannot register cnoc client\n");
1031 return -ENODEV;
1032 }
1033
1034 return 0;
1035}
1036
1037static int gmu_regulators_probe(struct gmu_device *gmu,
1038 struct device_node *node)
1039{
1040 const char *name;
1041 struct property *prop;
1042 struct device *dev = &gmu->pdev->dev;
1043 int ret = 0;
1044
1045 of_property_for_each_string(node, "regulator-names", prop, name) {
1046 if (!strcmp(name, "vddcx")) {
1047 gmu->cx_gdsc = devm_regulator_get(dev, name);
1048 if (IS_ERR(gmu->cx_gdsc)) {
1049 ret = PTR_ERR(gmu->cx_gdsc);
1050 dev_err(dev, "dt: GMU couldn't get CX gdsc\n");
1051 gmu->cx_gdsc = NULL;
1052 return ret;
1053 }
1054 } else if (!strcmp(name, "vdd")) {
1055 gmu->gx_gdsc = devm_regulator_get(dev, name);
1056 if (IS_ERR(gmu->gx_gdsc)) {
1057 ret = PTR_ERR(gmu->gx_gdsc);
1058 dev_err(dev, "dt: GMU couldn't get GX gdsc\n");
1059 gmu->gx_gdsc = NULL;
1060 return ret;
1061 }
1062 } else {
1063 dev_err(dev, "dt: Unknown GMU regulator: %s\n", name);
1064 return -ENODEV;
1065 }
1066 }
1067
1068 return 0;
1069}
1070
Kyle Piefere7b06b42017-04-06 13:53:01 -07001071static int gmu_irq_probe(struct gmu_device *gmu)
1072{
1073 int ret;
1074 struct kgsl_hfi *hfi = &gmu->hfi;
1075
1076 hfi->hfi_interrupt_num = platform_get_irq_byname(gmu->pdev,
1077 "kgsl_hfi_irq");
1078 ret = devm_request_irq(&gmu->pdev->dev,
1079 hfi->hfi_interrupt_num,
1080 hfi_irq_handler, IRQF_TRIGGER_HIGH,
1081 "HFI", hfi);
1082 if (ret) {
1083 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1084 hfi->hfi_interrupt_num, ret);
1085 return ret;
1086 }
1087
1088 gmu->gmu_interrupt_num = platform_get_irq_byname(gmu->pdev,
1089 "kgsl_gmu_irq");
1090 ret = devm_request_irq(&gmu->pdev->dev,
1091 gmu->gmu_interrupt_num,
1092 gmu_irq_handler, IRQF_TRIGGER_HIGH,
1093 "GMU", gmu);
1094 if (ret)
1095 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1096 gmu->gmu_interrupt_num, ret);
1097
1098 return ret;
1099}
1100
1101static void gmu_irq_enable(struct kgsl_device *device)
1102{
1103 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1104 struct gmu_device *gmu = &device->gmu;
1105 struct kgsl_hfi *hfi = &gmu->hfi;
1106
1107 /* Clear any pending IRQs before unmasking on GMU */
1108 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1109 0xFFFFFFFF);
1110 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1111 0xFFFFFFFF);
1112
1113 /* Unmask needed IRQs on GMU */
1114 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1115 (unsigned int) ~HFI_IRQ_MASK);
1116 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1117 (unsigned int) ~GMU_AO_INT_MASK);
1118
1119 /* Enable all IRQs on host */
1120 enable_irq(hfi->hfi_interrupt_num);
1121 enable_irq(gmu->gmu_interrupt_num);
1122}
1123
1124static void gmu_irq_disable(struct kgsl_device *device)
1125{
1126 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1127 struct gmu_device *gmu = &device->gmu;
1128 struct kgsl_hfi *hfi = &gmu->hfi;
1129
1130 /* Disable all IRQs on host */
1131 disable_irq(gmu->gmu_interrupt_num);
1132 disable_irq(hfi->hfi_interrupt_num);
1133
1134 /* Mask all IRQs on GMU */
1135 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1136 0xFFFFFFFF);
1137 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1138 0xFFFFFFFF);
1139
1140 /* Clear any pending IRQs before disabling */
1141 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1142 0xFFFFFFFF);
1143 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1144 0xFFFFFFFF);
1145}
1146
Kyle Pieferb1027b02017-02-10 13:58:58 -08001147/* Do not access any GMU registers in GMU probe function */
1148int gmu_probe(struct kgsl_device *device)
1149{
1150 struct device_node *node;
1151 struct gmu_device *gmu = &device->gmu;
1152 struct gmu_memdesc *mem_addr = NULL;
1153 struct kgsl_hfi *hfi = &gmu->hfi;
1154 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Kyle Pieferd3964162017-04-06 15:44:03 -07001155 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001156 int i = 0, ret = -ENXIO;
1157
1158 node = of_find_compatible_node(device->pdev->dev.of_node,
1159 NULL, "qcom,gpu-gmu");
1160
1161 if (node == NULL)
1162 return ret;
1163
1164 device->gmu.pdev = of_find_device_by_node(node);
1165
1166 /* Set up GMU regulators */
1167 ret = gmu_regulators_probe(gmu, node);
1168 if (ret)
1169 goto error;
1170
1171 /* Set up GMU clocks */
1172 ret = gmu_clocks_probe(gmu, node);
1173 if (ret)
1174 goto error;
1175
1176 /* Set up GMU IOMMU and shared memory with GMU */
1177 ret = gmu_memory_probe(&device->gmu, node);
1178 if (ret)
1179 goto error;
1180 mem_addr = gmu->hfi_mem;
1181
1182 /* Map and reserve GMU CSRs registers */
George Shencc7e1092017-08-30 10:45:52 -07001183 ret = gmu_reg_probe(gmu, "kgsl_gmu_reg", true);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001184 if (ret)
1185 goto error;
1186
George Shencc7e1092017-08-30 10:45:52 -07001187 ret = gmu_reg_probe(gmu, "kgsl_gmu_pdc_reg", false);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001188 if (ret)
1189 goto error;
1190
1191 gmu->gmu2gpu_offset = (gmu->reg_phys - device->reg_phys) >> 2;
1192
Kyle Piefere7b06b42017-04-06 13:53:01 -07001193 /* Initialize HFI and GMU interrupts */
1194 ret = gmu_irq_probe(gmu);
1195 if (ret)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001196 goto error;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001197
1198 /* Don't enable GMU interrupts until GMU started */
Kyle Piefere7b06b42017-04-06 13:53:01 -07001199 /* We cannot use gmu_irq_disable because it writes registers */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001200 disable_irq(gmu->gmu_interrupt_num);
1201 disable_irq(hfi->hfi_interrupt_num);
1202
1203 tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long)gmu);
1204 INIT_LIST_HEAD(&hfi->msglist);
1205 spin_lock_init(&hfi->msglock);
1206
1207 /* Retrieves GMU/GPU power level configurations*/
1208 ret = gmu_pwrlevel_probe(gmu, node);
1209 if (ret)
1210 goto error;
1211
1212 gmu->num_gpupwrlevels = pwr->num_pwrlevels;
1213
1214 for (i = 0; i < gmu->num_gpupwrlevels; i++) {
1215 int j = gmu->num_gpupwrlevels - 1 - i;
1216
1217 gmu->gpu_freqs[i] = pwr->pwrlevels[j].gpu_freq;
1218 }
1219
1220 /* Initializes GPU b/w levels configuration */
1221 ret = gmu_gpu_bw_probe(gmu);
1222 if (ret)
1223 goto error;
1224
1225 /* Initialize GMU CNOC b/w levels configuration */
1226 ret = gmu_cnoc_bw_probe(gmu);
1227 if (ret)
1228 goto error;
1229
1230 /* Populates RPMh configurations */
1231 ret = gmu_rpmh_init(gmu, pwr);
1232 if (ret)
1233 goto error;
1234
1235 hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
1236
Kyle Pieferd3964162017-04-06 15:44:03 -07001237 /* Set up GMU idle states */
1238 if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT))
1239 gmu->idle_level = GPU_HW_MIN_VOLT;
1240 else if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP))
1241 gmu->idle_level = GPU_HW_NAP;
1242 else if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
1243 gmu->idle_level = GPU_HW_IFPC;
1244 else if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
1245 gmu->idle_level = GPU_HW_SPTP_PC;
1246 else
1247 gmu->idle_level = GPU_HW_ACTIVE;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001248
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07001249 /* disable LM during boot time */
1250 clear_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001251 return 0;
1252
1253error:
1254 gmu_remove(device);
1255 return ret;
1256}
1257
1258
1259
1260static int gmu_enable_clks(struct gmu_device *gmu)
1261{
1262 int ret, j = 0;
1263
1264 if (IS_ERR_OR_NULL(gmu->clks[0]))
1265 return -EINVAL;
1266
1267 ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1268 if (ret) {
1269 dev_err(&gmu->pdev->dev, "fail to set default GMU clk freq %d\n",
1270 gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1271 return ret;
1272 }
1273
1274 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1275 ret = clk_prepare_enable(gmu->clks[j]);
1276 if (ret) {
1277 dev_err(&gmu->pdev->dev,
1278 "fail to enable gpucc clk idx %d\n",
1279 j);
1280 return ret;
1281 }
1282 j++;
1283 }
1284
1285 set_bit(GMU_CLK_ON, &gmu->flags);
1286 return 0;
1287}
1288
1289static int gmu_disable_clks(struct gmu_device *gmu)
1290{
Kyle Pieferde855722017-07-07 12:18:59 -07001291 int j = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001292
1293 if (IS_ERR_OR_NULL(gmu->clks[0]))
1294 return 0;
1295
Kyle Pieferb1027b02017-02-10 13:58:58 -08001296 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1297 clk_disable_unprepare(gmu->clks[j]);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001298 j++;
1299 }
1300
1301 clear_bit(GMU_CLK_ON, &gmu->flags);
1302 return 0;
1303
1304}
1305
1306static int gmu_enable_gdsc(struct gmu_device *gmu)
1307{
1308 int ret;
1309
1310 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1311 return 0;
1312
1313 ret = regulator_enable(gmu->cx_gdsc);
1314 if (ret)
1315 dev_err(&gmu->pdev->dev,
1316 "Failed to enable GMU CX gdsc, error %d\n", ret);
1317
1318 return ret;
1319}
1320
George Shenccf7ab42017-10-16 17:22:43 -07001321#define CX_GDSC_TIMEOUT 5000 /* ms */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001322static int gmu_disable_gdsc(struct gmu_device *gmu)
1323{
1324 int ret;
George Shen433b0c72017-06-12 09:44:34 -07001325 unsigned long t;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001326
1327 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1328 return 0;
1329
1330 ret = regulator_disable(gmu->cx_gdsc);
George Shen433b0c72017-06-12 09:44:34 -07001331 if (ret) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001332 dev_err(&gmu->pdev->dev,
1333 "Failed to disable GMU CX gdsc, error %d\n", ret);
George Shen433b0c72017-06-12 09:44:34 -07001334 return ret;
1335 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001336
George Shen433b0c72017-06-12 09:44:34 -07001337 /*
1338 * After GX GDSC is off, CX GDSC must be off
1339 * Voting off alone from GPU driver cannot
George Shenccf7ab42017-10-16 17:22:43 -07001340 * Guarantee CX GDSC off. Polling with 5s
George Shen433b0c72017-06-12 09:44:34 -07001341 * timeout to ensure
1342 */
1343 t = jiffies + msecs_to_jiffies(CX_GDSC_TIMEOUT);
1344 do {
1345 if (!regulator_is_enabled(gmu->cx_gdsc))
1346 return 0;
Oleg Pereletf02d27b2018-01-15 13:22:47 -08001347 usleep_range(10, 100);
George Shen433b0c72017-06-12 09:44:34 -07001348
1349 } while (!(time_after(jiffies, t)));
1350
Oleg Pereletf02d27b2018-01-15 13:22:47 -08001351 if (!regulator_is_enabled(gmu->cx_gdsc))
1352 return 0;
1353
George Shen433b0c72017-06-12 09:44:34 -07001354 dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout");
1355 return -ETIMEDOUT;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001356}
1357
Deepak Kumar31bb0352018-04-13 11:50:09 +05301358int gmu_suspend(struct kgsl_device *device)
Kyle Piefere923b7a2017-03-28 17:31:48 -07001359{
1360 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1361 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1362 struct gmu_device *gmu = &device->gmu;
1363
1364 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1365 return 0;
1366
1367 /* Pending message in all queues are abandoned */
1368 hfi_stop(gmu);
1369 clear_bit(GMU_HFI_ON, &gmu->flags);
1370 gmu_irq_disable(device);
1371
1372 if (gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0))
1373 return -EINVAL;
1374
1375 gmu_disable_clks(gmu);
1376 gmu_disable_gdsc(gmu);
George Shenf135a972017-08-24 16:59:42 -07001377 dev_err(&gmu->pdev->dev, "Suspended GMU\n");
Kyle Piefere923b7a2017-03-28 17:31:48 -07001378 return 0;
1379}
1380
George Shenf6c15bd2017-11-01 12:22:12 -07001381void gmu_snapshot(struct kgsl_device *device)
George Shen6927d8f2017-07-19 11:38:10 -07001382{
1383 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1384 struct gmu_device *gmu = &device->gmu;
1385
Sushmita Susheelendra73bf5762018-09-28 15:55:54 -06001386 send_nmi_to_gmu(adreno_dev);
George Shena4114092017-12-01 11:27:47 -08001387 /* Wait for the NMI to be handled */
George Shena4114092017-12-01 11:27:47 -08001388 udelay(100);
1389 kgsl_device_snapshot(device, NULL, true);
George Shen6927d8f2017-07-19 11:38:10 -07001390
George Shena4114092017-12-01 11:27:47 -08001391 adreno_write_gmureg(adreno_dev,
1392 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, 0xFFFFFFFF);
1393 adreno_write_gmureg(adreno_dev,
1394 ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1395 (unsigned int) ~HFI_IRQ_MASK);
George Shenf135a972017-08-24 16:59:42 -07001396
1397 gmu->fault_count++;
George Shen6927d8f2017-07-19 11:38:10 -07001398}
1399
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301400static void gmu_change_gpu_pwrlevel(struct kgsl_device *device,
1401 unsigned int new_level) {
1402
1403 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1404 unsigned int old_level = pwr->active_pwrlevel;
1405
1406 /*
1407 * Update the level according to any thermal,
1408 * max/min, or power constraints.
1409 */
1410 new_level = kgsl_pwrctrl_adjust_pwrlevel(device, new_level);
1411
1412 /*
1413 * If thermal cycling is required and the new level hits the
1414 * thermal limit, kick off the cycling.
1415 */
1416 kgsl_pwrctrl_set_thermal_cycle(device, new_level);
1417
1418 pwr->active_pwrlevel = new_level;
1419 pwr->previous_pwrlevel = old_level;
1420
1421 /* Request adjusted DCVS level */
1422 kgsl_clk_set_rate(device, pwr->active_pwrlevel);
1423}
1424
Kyle Pieferb1027b02017-02-10 13:58:58 -08001425/* To be called to power on both GPU and GMU */
1426int gmu_start(struct kgsl_device *device)
1427{
Kyle Pieferb801ab92017-07-13 14:54:13 -07001428 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001429 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1430 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1431 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1432 struct gmu_device *gmu = &device->gmu;
Deepak Kumar31bb0352018-04-13 11:50:09 +05301433 unsigned int boot_state = GMU_WARM_BOOT;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001434
Kyle Piefere923b7a2017-03-28 17:31:48 -07001435 switch (device->state) {
1436 case KGSL_STATE_INIT:
1437 case KGSL_STATE_SUSPEND:
1438 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1439 gmu_enable_gdsc(gmu);
1440 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001441 gmu_irq_enable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001442
Kyle Pieferb1027b02017-02-10 13:58:58 -08001443 /* Vote for 300MHz DDR for GMU to init */
1444 ret = msm_bus_scale_client_update_request(gmu->pcl,
Kyle Pieferb801ab92017-07-13 14:54:13 -07001445 pwr->pwrlevels[pwr->default_pwrlevel].bus_freq);
George Shenf135a972017-08-24 16:59:42 -07001446 if (ret)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001447 dev_err(&gmu->pdev->dev,
George Shenf135a972017-08-24 16:59:42 -07001448 "Failed to allocate gmu b/w: %d\n", ret);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001449
1450 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1451 GMU_COLD_BOOT, 0);
1452 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001453 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001454
1455 ret = hfi_start(gmu, GMU_COLD_BOOT);
1456 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001457 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001458
Oleg Perelet5306240a2017-09-19 12:32:51 -07001459 /* Request default DCVS level */
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301460 gmu_change_gpu_pwrlevel(device, pwr->default_pwrlevel);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001461 msm_bus_scale_client_update_request(gmu->pcl, 0);
1462 break;
1463
1464 case KGSL_STATE_SLUMBER:
1465 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1466 gmu_enable_gdsc(gmu);
1467 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001468 gmu_irq_enable(device);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001469
Deepak Kumar31bb0352018-04-13 11:50:09 +05301470 /*
1471 * If unrecovered is set that means last
1472 * wakeup from SLUMBER state failed. Use GMU
1473 * and HFI boot state as COLD as this is a
1474 * boot after RESET.
1475 */
1476 if (gmu->unrecovered)
1477 boot_state = GMU_COLD_BOOT;
1478
Kyle Pieferb1027b02017-02-10 13:58:58 -08001479 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
Deepak Kumar31bb0352018-04-13 11:50:09 +05301480 boot_state, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001481 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001482 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001483
Deepak Kumar31bb0352018-04-13 11:50:09 +05301484 ret = hfi_start(gmu, boot_state);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001485 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001486 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001487
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301488 gmu_change_gpu_pwrlevel(device, pwr->default_pwrlevel);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001489 break;
1490
1491 case KGSL_STATE_RESET:
George Shenf135a972017-08-24 16:59:42 -07001492 if (test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv) ||
1493 test_bit(GMU_FAULT, &gmu->flags)) {
Kyle Piefere923b7a2017-03-28 17:31:48 -07001494 gmu_suspend(device);
1495 gmu_enable_gdsc(gmu);
1496 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001497 gmu_irq_enable(device);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001498
Kyle Piefere923b7a2017-03-28 17:31:48 -07001499 ret = gpudev->rpmh_gpu_pwrctrl(
Deepak Kumar0eb0a0c2018-04-24 14:11:53 +05301500 adreno_dev, GMU_FW_START, GMU_COLD_BOOT, 0);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001501 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001502 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001503
Kyle Piefere923b7a2017-03-28 17:31:48 -07001504
Kyle Piefer7a714cd2017-06-21 15:55:47 -07001505 ret = hfi_start(gmu, GMU_COLD_BOOT);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001506 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001507 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001508
1509 /* Send DCVS level prior to reset*/
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301510 gmu_change_gpu_pwrlevel(device,
Oleg Perelet5306240a2017-09-19 12:32:51 -07001511 pwr->default_pwrlevel);
Kyle Piefer42de1402017-09-15 11:28:47 -07001512 } else {
1513 /* GMU fast boot */
1514 hfi_stop(gmu);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001515
Kyle Piefer42de1402017-09-15 11:28:47 -07001516 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
Deepak Kumar0eb0a0c2018-04-24 14:11:53 +05301517 GMU_COLD_BOOT, 0);
Kyle Piefer42de1402017-09-15 11:28:47 -07001518 if (ret)
1519 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001520
Kyle Piefer42de1402017-09-15 11:28:47 -07001521 ret = hfi_start(gmu, GMU_WARM_BOOT);
1522 if (ret)
1523 goto error_gmu;
1524 }
Kyle Piefere923b7a2017-03-28 17:31:48 -07001525 break;
1526 default:
1527 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001528 }
1529
Deepak Kumar31bb0352018-04-13 11:50:09 +05301530 /* Clear unrecovered as GMU start is successful */
1531 gmu->unrecovered = false;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001532 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001533
George Shenf135a972017-08-24 16:59:42 -07001534error_gmu:
Kyle Piefer42de1402017-09-15 11:28:47 -07001535 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1536 gpudev->oob_clear(adreno_dev,
1537 OOB_BOOT_SLUMBER_CLEAR_MASK);
George Shen6927d8f2017-07-19 11:38:10 -07001538 gmu_snapshot(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001539 return ret;
1540}
1541
1542/* Caller shall ensure GPU is ready for SLUMBER */
1543void gmu_stop(struct kgsl_device *device)
1544{
1545 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001546 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1547 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001548 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001549
1550 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1551 return;
1552
Kyle Piefer4033f562017-08-16 10:00:48 -07001553 /* Wait for the lowest idle level we requested */
1554 if (gpudev->wait_for_lowest_idle &&
1555 gpudev->wait_for_lowest_idle(adreno_dev))
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001556 goto error;
Carter Cooper1ee715a2017-09-07 16:08:38 -06001557
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001558 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
1559 if (ret)
1560 goto error;
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001561
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001562 if (gpudev->wait_for_gmu_idle &&
1563 gpudev->wait_for_gmu_idle(adreno_dev))
1564 goto error;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001565
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001566 /* Pending message in all queues are abandoned */
1567 hfi_stop(gmu);
1568 clear_bit(GMU_HFI_ON, &gmu->flags);
1569 gmu_irq_disable(device);
1570
1571 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
1572 gmu_disable_clks(gmu);
1573 gmu_disable_gdsc(gmu);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001574
1575 msm_bus_scale_client_update_request(gmu->pcl, 0);
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001576 return;
1577
1578error:
1579 /*
1580 * The power controller will change state to SLUMBER anyway
1581 * Set GMU_FAULT flag to indicate to power contrller
1582 * that hang recovery is needed to power on GPU
1583 */
1584 set_bit(GMU_FAULT, &gmu->flags);
1585 dev_err(&gmu->pdev->dev, "Failed to stop GMU\n");
1586 gmu_snapshot(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001587}
1588
1589void gmu_remove(struct kgsl_device *device)
1590{
1591 struct gmu_device *gmu = &device->gmu;
1592 struct kgsl_hfi *hfi = &gmu->hfi;
Kyle Piefer8570d512017-04-21 14:50:51 -07001593 int i = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001594
1595 if (!device->gmu.pdev)
1596 return;
1597
1598 tasklet_kill(&hfi->tasklet);
1599
1600 gmu_stop(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -07001601 gmu_irq_disable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001602
Kyle Piefer8570d512017-04-21 14:50:51 -07001603 while ((i < MAX_GMU_CLKS) && gmu->clks[i]) {
1604 gmu->clks[i] = NULL;
1605 i++;
1606 }
1607
Kyle Pieferf7febd62017-03-20 16:49:49 -07001608 if (gmu->gmu_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001609 devm_free_irq(&gmu->pdev->dev,
1610 gmu->gmu_interrupt_num, gmu);
1611 gmu->gmu_interrupt_num = 0;
1612 }
1613
1614 if (hfi->hfi_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001615 devm_free_irq(&gmu->pdev->dev,
Kyle Piefercec5e212017-05-19 13:15:15 -07001616 hfi->hfi_interrupt_num, hfi);
Kyle Pieferf7febd62017-03-20 16:49:49 -07001617 hfi->hfi_interrupt_num = 0;
1618 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001619
1620 if (gmu->ccl) {
1621 msm_bus_scale_unregister_client(gmu->ccl);
1622 gmu->ccl = 0;
1623 }
1624
1625 if (gmu->pcl) {
1626 msm_bus_scale_unregister_client(gmu->pcl);
1627 gmu->pcl = 0;
1628 }
1629
1630 if (gmu->pdc_reg_virt) {
1631 devm_iounmap(&gmu->pdev->dev, gmu->pdc_reg_virt);
1632 gmu->pdc_reg_virt = NULL;
1633 }
1634
1635 if (gmu->reg_virt) {
1636 devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001637 gmu->reg_virt = NULL;
1638 }
1639
1640 if (gmu->hfi_mem || gmu->dump_mem)
1641 gmu_memory_close(&device->gmu);
1642
1643 for (i = 0; i < MAX_GMU_CLKS; i++) {
1644 if (gmu->clks[i]) {
1645 devm_clk_put(&gmu->pdev->dev, gmu->clks[i]);
1646 gmu->clks[i] = NULL;
1647 }
1648 }
1649
1650 if (gmu->gx_gdsc) {
1651 devm_regulator_put(gmu->gx_gdsc);
1652 gmu->gx_gdsc = NULL;
1653 }
1654
1655 if (gmu->cx_gdsc) {
1656 devm_regulator_put(gmu->cx_gdsc);
1657 gmu->cx_gdsc = NULL;
1658 }
1659
1660 device->gmu.pdev = NULL;
1661}
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001662
1663/*
1664 * adreno_gmu_fenced_write() - Check if there is a GMU and it is enabled
1665 * @adreno_dev: Pointer to the Adreno device device that owns the GMU
1666 * @offset: 32bit register enum that is to be written
1667 * @val: The value to be written to the register
1668 * @fence_mask: The value to poll the fence status register
1669 *
1670 * Check the WRITEDROPPED0/1 bit in the FENCE_STATUS regsiter to check if
1671 * the write to the fenced register went through. If it didn't then we retry
1672 * the write until it goes through or we time out.
1673 */
Harshdeep Dhatt56107782017-12-05 09:54:47 -07001674int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001675 enum adreno_regs offset, unsigned int val,
1676 unsigned int fence_mask)
1677{
1678 unsigned int status, i;
Harshdeep Dhattd88f10e2018-01-12 15:16:54 -07001679 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1680 unsigned int reg_offset = gpudev->reg_offsets->offsets[offset];
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001681
1682 adreno_writereg(adreno_dev, offset, val);
1683
1684 if (!kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
Harshdeep Dhatt56107782017-12-05 09:54:47 -07001685 return 0;
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001686
Deepak Kumarc248a232018-08-29 16:10:49 +05301687 for (i = 0; i < GMU_LONG_WAKEUP_RETRY_LIMIT; i++) {
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001688 adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_AHB_FENCE_STATUS,
1689 &status);
1690
1691 /*
1692 * If !writedropped0/1, then the write to fenced register
1693 * was successful
1694 */
1695 if (!(status & fence_mask))
Harshdeep Dhatt56107782017-12-05 09:54:47 -07001696 return 0;
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001697 /* Wait a small amount of time before trying again */
1698 udelay(GMU_WAKEUP_DELAY_US);
1699
1700 /* Try to write the fenced register again */
1701 adreno_writereg(adreno_dev, offset, val);
Deepak Kumarc248a232018-08-29 16:10:49 +05301702
1703 if (i == GMU_SHORT_WAKEUP_RETRY_LIMIT)
1704 dev_err(adreno_dev->dev.dev,
1705 "Waited %d usecs to write fenced register 0x%x. Continuing to wait...\n",
1706 (GMU_SHORT_WAKEUP_RETRY_LIMIT *
1707 GMU_WAKEUP_DELAY_US),
1708 reg_offset);
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001709 }
1710
1711 dev_err(adreno_dev->dev.dev,
Deepak Kumarc248a232018-08-29 16:10:49 +05301712 "Timed out waiting %d usecs to write fenced register 0x%x\n",
1713 GMU_LONG_WAKEUP_RETRY_LIMIT * GMU_WAKEUP_DELAY_US,
1714 reg_offset);
1715
Harshdeep Dhatt56107782017-12-05 09:54:47 -07001716 return -ETIMEDOUT;
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001717}