blob: 10446f786363ceeb1abab16fe39d7e9b5e15223e [file] [log] [blame]
Oleg Pereletf02d27b2018-01-15 13:22:47 -08001/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
Kyle Pieferb1027b02017-02-10 13:58:58 -08002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Abhilash Kumarb07f9b02017-09-26 15:29:34 +053013#include <linux/module.h>
Kyle Pieferb1027b02017-02-10 13:58:58 -080014#include <linux/types.h>
15#include <linux/device.h>
16#include <linux/iommu.h>
17#include <linux/of_platform.h>
18#include <linux/msm-bus.h>
19#include <linux/msm-bus-board.h>
20#include <linux/pm_opp.h>
Patrick Dalyde1c64d2017-09-12 16:30:12 -070021#include <linux/io.h>
Kyle Pieferb1027b02017-02-10 13:58:58 -080022#include <soc/qcom/cmd-db.h>
23
24#include "kgsl_device.h"
25#include "kgsl_gmu.h"
26#include "kgsl_hfi.h"
27#include "a6xx_reg.h"
28#include "adreno.h"
29
Abhilash Kumarb07f9b02017-09-26 15:29:34 +053030#undef MODULE_PARAM_PREFIX
31#define MODULE_PARAM_PREFIX "kgsl_gmu."
32
33static bool nogmu;
34module_param(nogmu, bool, 0444);
35MODULE_PARM_DESC(nogmu, "Disable the GMU");
36
Kyle Pieferb1027b02017-02-10 13:58:58 -080037#define GMU_CONTEXT_USER 0
38#define GMU_CONTEXT_KERNEL 1
39#define GMU_KERNEL_ENTRIES 8
40
41enum gmu_iommu_mem_type {
42 GMU_CACHED_CODE,
43 GMU_CACHED_DATA,
44 GMU_NONCACHED_KERNEL,
45 GMU_NONCACHED_USER
46};
47
48/*
49 * GMU virtual memory mapping definitions
50 */
51struct gmu_vma {
52 unsigned int noncached_ustart;
53 unsigned int noncached_usize;
54 unsigned int noncached_kstart;
55 unsigned int noncached_ksize;
56 unsigned int cached_dstart;
57 unsigned int cached_dsize;
58 unsigned int cached_cstart;
59 unsigned int cached_csize;
60 unsigned int image_start;
61};
62
63struct gmu_iommu_context {
64 const char *name;
65 struct device *dev;
66 struct iommu_domain *domain;
67};
68
69#define HFIMEM_SIZE SZ_16K
70
71#define DUMPMEM_SIZE SZ_16K
72
73/* Define target specific GMU VMA configurations */
74static const struct gmu_vma vma = {
75 /* Noncached user segment */
76 0x80000000, SZ_1G,
77 /* Noncached kernel segment */
78 0x60000000, SZ_512M,
79 /* Cached data segment */
80 0x44000, (SZ_256K-SZ_16K),
81 /* Cached code segment */
82 0x0, (SZ_256K-SZ_16K),
83 /* FW image */
84 0x0,
85};
86
87struct gmu_iommu_context gmu_ctx[] = {
88 [GMU_CONTEXT_USER] = { .name = "gmu_user" },
89 [GMU_CONTEXT_KERNEL] = { .name = "gmu_kernel" }
90};
91
92/*
93 * There are a few static memory buffers that are allocated and mapped at boot
94 * time for GMU to function. The buffers are permanent (not freed) after
95 * GPU boot. The size of the buffers are constant and not expected to change.
96 *
97 * We define an array and a simple allocator to keep track of the currently
98 * active SMMU entries of GMU kernel mode context. Each entry is assigned
99 * a unique address inside GMU kernel mode address range. The addresses
100 * are assigned sequentially and aligned to 1MB each.
101 *
102 */
103static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES];
104static unsigned long gmu_kmem_bitmap;
105
Kyle Piefer11a48b62017-03-17 14:53:40 -0700106/*
107 * kgsl_gmu_isenabled() - Check if there is a GMU and it is enabled
108 * @device: Pointer to the KGSL device that owns the GMU
109 *
110 * Check if a GMU has been found and successfully probed. Also
111 * check that the feature flag to use a GMU is enabled. Returns
112 * true if both of these conditions are met, otherwise false.
113 */
114bool kgsl_gmu_isenabled(struct kgsl_device *device)
115{
116 struct gmu_device *gmu = &device->gmu;
117 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
118
Abhilash Kumarb07f9b02017-09-26 15:29:34 +0530119 if (!nogmu && gmu->pdev &&
120 ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
Kyle Piefer11a48b62017-03-17 14:53:40 -0700121 return true;
Kyle Piefer11a48b62017-03-17 14:53:40 -0700122 return false;
123}
124
Kyle Pieferb1027b02017-02-10 13:58:58 -0800125static int _gmu_iommu_fault_handler(struct device *dev,
126 unsigned long addr, int flags, const char *name)
127{
128 char *fault_type = "unknown";
129
130 if (flags & IOMMU_FAULT_TRANSLATION)
131 fault_type = "translation";
132 else if (flags & IOMMU_FAULT_PERMISSION)
133 fault_type = "permission";
134
135 dev_err(dev, "GMU fault addr = %lX, context=%s (%s %s fault)\n",
136 addr, name,
137 (flags & IOMMU_FAULT_WRITE) ? "write" : "read",
138 fault_type);
139
140 return 0;
141}
142
143static int gmu_kernel_fault_handler(struct iommu_domain *domain,
144 struct device *dev, unsigned long addr, int flags, void *token)
145{
146 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_kernel");
147}
148
149static int gmu_user_fault_handler(struct iommu_domain *domain,
150 struct device *dev, unsigned long addr, int flags, void *token)
151{
152 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_user");
153}
154
155static void free_gmu_mem(struct gmu_device *gmu,
156 struct gmu_memdesc *md)
157{
158 /* Free GMU image memory */
159 if (md->hostptr)
160 dma_free_attrs(&gmu->pdev->dev, (size_t) md->size,
161 (void *)md->hostptr, md->physaddr, 0);
162 memset(md, 0, sizeof(*md));
163}
164
165static int alloc_and_map(struct gmu_device *gmu, unsigned int ctx_id,
166 struct gmu_memdesc *md, unsigned int attrs)
167{
168 int ret;
169 struct iommu_domain *domain;
170
171 domain = gmu_ctx[ctx_id].domain;
172
173 md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size,
174 &md->physaddr, GFP_KERNEL, 0);
175
176 if (md->hostptr == NULL)
177 return -ENOMEM;
178
179 ret = iommu_map(domain, md->gmuaddr,
180 md->physaddr, md->size,
181 attrs);
182
183 if (ret) {
184 dev_err(&gmu->pdev->dev,
Maria Yu0b515412017-09-26 15:43:31 +0800185 "gmu map err: gaddr=0x%016llX, paddr=0x%pa\n",
186 md->gmuaddr, &(md->physaddr));
Kyle Pieferb1027b02017-02-10 13:58:58 -0800187 free_gmu_mem(gmu, md);
188 }
189
190 return ret;
191}
192
193/*
194 * allocate_gmu_image() - allocates & maps memory for FW image, the size
195 * shall come from the loaded f/w file. Firmware image size shall be
196 * less than code cache size. Otherwise, FW may experience performance issue.
197 * @gmu: Pointer to GMU device
198 * @size: Requested allocation size
199 */
200int allocate_gmu_image(struct gmu_device *gmu, unsigned int size)
201{
202 struct gmu_memdesc *md = &gmu->fw_image;
203
204 if (size > vma.cached_csize) {
205 dev_err(&gmu->pdev->dev,
206 "GMU firmware size too big: %d\n", size);
207 return -EINVAL;
208 }
209
210 md->size = size;
211 md->gmuaddr = vma.image_start;
212 md->attr = GMU_CACHED_CODE;
213
214 return alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, IOMMU_READ);
215}
216
217/*
218 * allocate_gmu_kmem() - allocates and maps GMU kernel shared memory
219 * @gmu: Pointer to GMU device
220 * @size: Requested size
221 * @attrs: IOMMU mapping attributes
222 */
223static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
224 unsigned int size, unsigned int attrs)
225{
226 struct gmu_memdesc *md;
227 int ret, entry_idx = find_first_zero_bit(
228 &gmu_kmem_bitmap, GMU_KERNEL_ENTRIES);
229
230 size = PAGE_ALIGN(size);
231
232 if (size > SZ_1M || size == 0) {
233 dev_err(&gmu->pdev->dev,
234 "Requested %d bytes of GMU kernel memory, max=1MB\n",
235 size);
236 return ERR_PTR(-EINVAL);
237 }
238
239 if (entry_idx >= GMU_KERNEL_ENTRIES) {
240 dev_err(&gmu->pdev->dev,
241 "Ran out of GMU kernel mempool slots\n");
242 return ERR_PTR(-EINVAL);
243 }
244
245 /* Allocate GMU virtual memory */
246 md = &gmu_kmem_entries[entry_idx];
247 md->gmuaddr = vma.noncached_kstart + (entry_idx * SZ_1M);
248 set_bit(entry_idx, &gmu_kmem_bitmap);
249 md->attr = GMU_NONCACHED_KERNEL;
250 md->size = size;
251
252 ret = alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, attrs);
253
254 if (ret) {
255 clear_bit(entry_idx, &gmu_kmem_bitmap);
256 md->gmuaddr = 0;
257 return ERR_PTR(ret);
258 }
259
260 return md;
261}
262
263static int gmu_iommu_cb_probe(struct gmu_device *gmu,
264 struct gmu_iommu_context *ctx,
265 struct device_node *node)
266{
267 struct platform_device *pdev = of_find_device_by_node(node);
268 struct device *dev;
269 int ret;
270
271 dev = &pdev->dev;
272
273 ctx->dev = dev;
274 ctx->domain = iommu_domain_alloc(&platform_bus_type);
275 if (ctx->domain == NULL) {
276 dev_err(&gmu->pdev->dev, "gmu iommu fail to alloc %s domain\n",
277 ctx->name);
278 return -ENODEV;
279 }
280
281 ret = iommu_attach_device(ctx->domain, dev);
282 if (ret) {
283 dev_err(&gmu->pdev->dev, "gmu iommu fail to attach %s device\n",
284 ctx->name);
285 iommu_domain_free(ctx->domain);
286 }
287
288 return ret;
289}
290
291static struct {
292 const char *compatible;
293 int index;
294 iommu_fault_handler_t hdlr;
295} cbs[] = {
296 { "qcom,smmu-gmu-user-cb",
297 GMU_CONTEXT_USER,
298 gmu_user_fault_handler,
299 },
300 { "qcom,smmu-gmu-kernel-cb",
301 GMU_CONTEXT_KERNEL,
302 gmu_kernel_fault_handler,
303 },
304};
305
306/*
307 * gmu_iommu_init() - probe IOMMU context banks used by GMU
308 * and attach GMU device
309 * @gmu: Pointer to GMU device
310 * @node: Pointer to GMU device node
311 */
312int gmu_iommu_init(struct gmu_device *gmu, struct device_node *node)
313{
314 struct device_node *child;
315 struct gmu_iommu_context *ctx = NULL;
316 int ret, i;
317
318 of_platform_populate(node, NULL, NULL, &gmu->pdev->dev);
319
320 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
321 child = of_find_compatible_node(node, NULL, cbs[i].compatible);
322 if (child) {
323 ctx = &gmu_ctx[cbs[i].index];
324 ret = gmu_iommu_cb_probe(gmu, ctx, child);
325 if (ret)
326 return ret;
327 iommu_set_fault_handler(ctx->domain,
328 cbs[i].hdlr, ctx);
329 }
330 }
331
332 for (i = 0; i < ARRAY_SIZE(gmu_ctx); i++) {
333 if (gmu_ctx[i].domain == NULL) {
334 dev_err(&gmu->pdev->dev,
335 "Missing GMU %s context bank node\n",
336 gmu_ctx[i].name);
337 return -EINVAL;
338 }
339 }
340
341 return 0;
342}
343
344/*
345 * gmu_kmem_close() - free all kernel memory allocated for GMU and detach GMU
346 * from IOMMU context banks.
347 * @gmu: Pointer to GMU device
348 */
349void gmu_kmem_close(struct gmu_device *gmu)
350{
351 int i;
352 struct gmu_memdesc *md = &gmu->fw_image;
353 struct gmu_iommu_context *ctx = &gmu_ctx[GMU_CONTEXT_KERNEL];
354
355 /* Free GMU image memory */
356 free_gmu_mem(gmu, md);
357
358 /* Unmap image memory */
359 iommu_unmap(ctx->domain,
360 gmu->fw_image.gmuaddr,
361 gmu->fw_image.size);
362
363
364 gmu->hfi_mem = NULL;
365 gmu->dump_mem = NULL;
366
367 /* Unmap all memories in GMU kernel memory pool */
368 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
369 struct gmu_memdesc *memptr = &gmu_kmem_entries[i];
370
371 if (memptr->gmuaddr)
372 iommu_unmap(ctx->domain, memptr->gmuaddr, memptr->size);
373 }
374
375 /* Free GMU shared kernel memory */
376 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
377 md = &gmu_kmem_entries[i];
378 free_gmu_mem(gmu, md);
379 clear_bit(i, &gmu_kmem_bitmap);
380 }
381
382 /* Detach the device from SMMU context bank */
383 iommu_detach_device(ctx->domain, ctx->dev);
384
385 /* free kernel mem context */
386 iommu_domain_free(ctx->domain);
387}
388
389void gmu_memory_close(struct gmu_device *gmu)
390{
391 gmu_kmem_close(gmu);
392 /* Free user memory context */
393 iommu_domain_free(gmu_ctx[GMU_CONTEXT_USER].domain);
394
395}
396
397/*
398 * gmu_memory_probe() - probe GMU IOMMU context banks and allocate memory
399 * to share with GMU in kernel mode.
400 * @gmu: Pointer to GMU device
401 * @node: Pointer to GMU device node
402 */
403int gmu_memory_probe(struct gmu_device *gmu, struct device_node *node)
404{
405 int ret;
406
407 ret = gmu_iommu_init(gmu, node);
408 if (ret)
409 return ret;
410
411 /* Allocates & maps memory for HFI */
412 gmu->hfi_mem = allocate_gmu_kmem(gmu, HFIMEM_SIZE,
413 (IOMMU_READ | IOMMU_WRITE));
414 if (IS_ERR(gmu->hfi_mem)) {
415 ret = PTR_ERR(gmu->hfi_mem);
416 goto err_ret;
417 }
418
419 /* Allocates & maps GMU crash dump memory */
420 gmu->dump_mem = allocate_gmu_kmem(gmu, DUMPMEM_SIZE,
421 (IOMMU_READ | IOMMU_WRITE));
422 if (IS_ERR(gmu->dump_mem)) {
423 ret = PTR_ERR(gmu->dump_mem);
424 goto err_ret;
425 }
426
427 return 0;
428err_ret:
429 gmu_memory_close(gmu);
430 return ret;
431}
432
433/*
434 * gmu_dcvs_set() - request GMU to change GPU frequency and/or bandwidth.
435 * @gmu: Pointer to GMU device
436 * @gpu_pwrlevel: index to GPU DCVS table used by KGSL
437 * @bus_level: index to GPU bus table used by KGSL
438 *
439 * The function converts GPU power level and bus level index used by KGSL
440 * to index being used by GMU/RPMh.
441 */
442int gmu_dcvs_set(struct gmu_device *gmu,
443 unsigned int gpu_pwrlevel, unsigned int bus_level)
444{
445 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
446 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
447 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
448 int perf_idx = INVALID_DCVS_IDX, bw_idx = INVALID_DCVS_IDX;
George Shena2f7b432017-08-18 12:58:18 -0700449 int ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800450
George Shena2f7b432017-08-18 12:58:18 -0700451 if (gpu_pwrlevel < gmu->num_gpupwrlevels - 1)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800452 perf_idx = gmu->num_gpupwrlevels - gpu_pwrlevel - 1;
453
George Shena2f7b432017-08-18 12:58:18 -0700454 if (bus_level < gmu->num_bwlevels && bus_level > 0)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800455 bw_idx = bus_level;
456
457 if ((perf_idx == INVALID_DCVS_IDX) &&
458 (bw_idx == INVALID_DCVS_IDX))
459 return -EINVAL;
460
George Shena2f7b432017-08-18 12:58:18 -0700461 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
462 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev,
Kyle Pieferb1027b02017-02-10 13:58:58 -0800463 GMU_DCVS_NOHFI, perf_idx, bw_idx);
464
George Shena2f7b432017-08-18 12:58:18 -0700465 if (ret) {
George Shenf135a972017-08-24 16:59:42 -0700466 dev_err_ratelimited(&gmu->pdev->dev,
George Shena2f7b432017-08-18 12:58:18 -0700467 "Failed to set GPU perf idx %d, bw idx %d\n",
468 perf_idx, bw_idx);
469
George Shenf135a972017-08-24 16:59:42 -0700470 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
471 adreno_dispatcher_schedule(device);
George Shena2f7b432017-08-18 12:58:18 -0700472 }
473
474 return ret;
475 }
476
Kyle Pieferb1027b02017-02-10 13:58:58 -0800477 return hfi_send_dcvs_vote(gmu, perf_idx, bw_idx, ACK_NONBLOCK);
478}
479
480struct rpmh_arc_vals {
481 unsigned int num;
482 uint16_t val[MAX_GX_LEVELS];
483};
484
485static const char gfx_res_id[] = "gfx.lvl";
486static const char cx_res_id[] = "cx.lvl";
487static const char mx_res_id[] = "mx.lvl";
488
489enum rpmh_vote_type {
490 GPU_ARC_VOTE = 0,
491 GMU_ARC_VOTE,
492 INVALID_ARC_VOTE,
493};
494
495static const char debug_strs[][8] = {
496 [GPU_ARC_VOTE] = "gpu",
497 [GMU_ARC_VOTE] = "gmu",
498};
499
500/*
501 * rpmh_arc_cmds() - query RPMh command database for GX/CX/MX rail
502 * VLVL tables. The index of table will be used by GMU to vote rail
503 * voltage.
504 *
505 * @gmu: Pointer to GMU device
506 * @arc: Pointer to RPMh rail controller (ARC) voltage table
507 * @res_id: Pointer to 8 char array that contains rail name
508 */
509static int rpmh_arc_cmds(struct gmu_device *gmu,
510 struct rpmh_arc_vals *arc, const char *res_id)
511{
512 unsigned int len;
513
514 len = cmd_db_get_aux_data_len(res_id);
Kyle Pieferec7b4192017-08-17 15:35:36 -0700515 if (len == 0)
516 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800517
518 if (len > (MAX_GX_LEVELS << 1)) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800519 dev_err(&gmu->pdev->dev,
520 "gfx cmddb size %d larger than alloc buf %d of %s\n",
521 len, (MAX_GX_LEVELS << 1), res_id);
522 return -EINVAL;
523 }
524
525 cmd_db_get_aux_data(res_id, (uint8_t *)arc->val, len);
Kyle Pieferec7b4192017-08-17 15:35:36 -0700526
527 /*
528 * cmd_db_get_aux_data() gives us a zero-padded table of
529 * size len that contains the arc values. To determine the
530 * number of arc values, we loop through the table and count
531 * them until we get to the end of the buffer or hit the
532 * zero padding.
533 */
Archana Sriram8ce571b2017-10-12 18:33:33 +0530534 for (arc->num = 1; arc->num < (len >> 1); arc->num++) {
535 if (arc->val[arc->num - 1] >= arc->val[arc->num])
George Shen07b4f782017-07-13 10:42:53 -0700536 break;
537 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800538
539 return 0;
540}
541
542/*
543 * setup_volt_dependency_tbl() - set up GX->MX or CX->MX rail voltage
544 * dependencies. Second rail voltage shall be equal to or higher than
545 * primary rail voltage. VLVL table index was used by RPMh for PMIC
546 * voltage setting.
547 * @votes: Pointer to a ARC vote descriptor
548 * @pri_rail: Pointer to primary power rail VLVL table
549 * @sec_rail: Pointer to second/dependent power rail VLVL table
550 * @vlvl: Pointer to VLVL table being used by GPU or GMU driver, a subset
551 * of pri_rail VLVL table
552 * @num_entries: Valid number of entries in table pointed by "vlvl" parameter
553 */
554static int setup_volt_dependency_tbl(struct arc_vote_desc *votes,
555 struct rpmh_arc_vals *pri_rail, struct rpmh_arc_vals *sec_rail,
556 unsigned int *vlvl, unsigned int num_entries)
557{
558 int i, j, k;
559 uint16_t cur_vlvl;
George Shen07b4f782017-07-13 10:42:53 -0700560 bool found_match;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800561
562 /* i tracks current KGSL GPU frequency table entry
563 * j tracks second rail voltage table entry
564 * k tracks primary rail voltage table entry
565 */
George Shen07b4f782017-07-13 10:42:53 -0700566 for (i = 0; i < num_entries; i++) {
567 found_match = false;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800568
George Shen07b4f782017-07-13 10:42:53 -0700569 /* Look for a primary rail voltage that matches a VLVL level */
570 for (k = 0; k < pri_rail->num; k++) {
571 if (pri_rail->val[k] == vlvl[i]) {
572 votes[i].pri_idx = k;
573 votes[i].vlvl = vlvl[i];
574 cur_vlvl = vlvl[i];
575 found_match = true;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800576 break;
577 }
578 }
579
George Shen07b4f782017-07-13 10:42:53 -0700580 /* If we did not find a matching VLVL level then abort */
581 if (!found_match)
582 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800583
George Shen07b4f782017-07-13 10:42:53 -0700584 /*
585 * Look for a secondary rail index whose VLVL value
586 * is greater than or equal to the VLVL value of the
587 * corresponding index of the primary rail
588 */
589 for (j = 0; j < sec_rail->num; j++) {
590 if (sec_rail->val[j] >= cur_vlvl ||
591 j + 1 == sec_rail->num) {
592 votes[i].sec_idx = j;
593 break;
594 }
595 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800596 }
597 return 0;
598}
599
600/*
601 * rpmh_arc_votes_init() - initialized RPMh votes needed for rails voltage
602 * scaling by GMU.
603 * @gmu: Pointer to GMU device
604 * @pri_rail: Pointer to primary power rail VLVL table
605 * @sec_rail: Pointer to second/dependent power rail VLVL table
606 * of pri_rail VLVL table
607 * @type: the type of the primary rail, GPU or GMU
608 */
609static int rpmh_arc_votes_init(struct gmu_device *gmu,
610 struct rpmh_arc_vals *pri_rail,
611 struct rpmh_arc_vals *sec_rail,
612 unsigned int type)
613{
George Shen07b4f782017-07-13 10:42:53 -0700614 struct device *dev;
615 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800616 unsigned int num_freqs;
617 struct arc_vote_desc *votes;
618 unsigned int vlvl_tbl[MAX_GX_LEVELS];
619 unsigned int *freq_tbl;
620 int i, ret;
George Shen07b4f782017-07-13 10:42:53 -0700621 struct dev_pm_opp *opp;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800622
623 if (type == GPU_ARC_VOTE) {
624 num_freqs = gmu->num_gpupwrlevels;
625 votes = gmu->rpmh_votes.gx_votes;
George Shen07b4f782017-07-13 10:42:53 -0700626 freq_tbl = gmu->gpu_freqs;
627 dev = &device->pdev->dev;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800628 } else if (type == GMU_ARC_VOTE) {
629 num_freqs = gmu->num_gmupwrlevels;
630 votes = gmu->rpmh_votes.cx_votes;
George Shen07b4f782017-07-13 10:42:53 -0700631 freq_tbl = gmu->gmu_freqs;
632 dev = &gmu->pdev->dev;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800633 } else {
634 return -EINVAL;
635 }
636
637 if (num_freqs > pri_rail->num) {
638 dev_err(&gmu->pdev->dev,
639 "%s defined more DCVS levels than RPMh can support\n",
640 debug_strs[type]);
641 return -EINVAL;
642 }
643
George Shen07b4f782017-07-13 10:42:53 -0700644 memset(vlvl_tbl, 0, sizeof(vlvl_tbl));
Kyle Pieferb1027b02017-02-10 13:58:58 -0800645 for (i = 0; i < num_freqs; i++) {
George Shen07b4f782017-07-13 10:42:53 -0700646 /* Hardcode VLVL for 0 because it is not registered in OPP */
647 if (freq_tbl[i] == 0) {
648 vlvl_tbl[i] = 0;
649 continue;
650 }
651
652 /* Otherwise get the value from the OPP API */
653 opp = dev_pm_opp_find_freq_exact(dev, freq_tbl[i], true);
654 if (IS_ERR(opp)) {
655 dev_err(&gmu->pdev->dev,
656 "Failed to find opp freq %d of %s\n",
657 freq_tbl[i], debug_strs[type]);
658 return PTR_ERR(opp);
659 }
660
661 /* Values from OPP framework are offset by 1 */
662 vlvl_tbl[i] = dev_pm_opp_get_voltage(opp) - 1;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800663 }
664
665 ret = setup_volt_dependency_tbl(votes,
666 pri_rail, sec_rail, vlvl_tbl, num_freqs);
667
668 if (ret)
669 dev_err(&gmu->pdev->dev, "%s rail volt failed to match DT freqs\n",
670 debug_strs[type]);
671
672 return ret;
673}
674
675/*
676 * build_rpmh_bw_votes() - build TCS commands to vote for bandwidth.
677 * Each command sets frequency of a node along path to DDR or CNOC.
678 * @rpmh_vote: Pointer to RPMh vote needed by GMU to set BW via RPMh
679 * @num_usecases: Number of BW use cases (or BW levels)
680 * @handle: Provided by bus driver. It contains TCS command sets for
681 * all BW use cases of a bus client.
682 */
683static void build_rpmh_bw_votes(struct gmu_bw_votes *rpmh_vote,
684 unsigned int num_usecases, struct msm_bus_tcs_handle handle)
685{
686 struct msm_bus_tcs_usecase *tmp;
687 int i, j;
688
689 for (i = 0; i < num_usecases; i++) {
690 tmp = &handle.usecases[i];
691 for (j = 0; j < tmp->num_cmds; j++) {
692 if (!i) {
693 /*
694 * Wait bitmask and TCS command addresses are
695 * same for all bw use cases. To save data volume
696 * exchanged between driver and GMU, only
697 * transfer bitmasks and TCS command addresses
698 * of first set of bw use case
699 */
700 rpmh_vote->cmds_per_bw_vote = tmp->num_cmds;
701 rpmh_vote->cmds_wait_bitmask =
702 tmp->cmds[j].complete ?
703 rpmh_vote->cmds_wait_bitmask
704 | BIT(i)
705 : rpmh_vote->cmds_wait_bitmask
706 & (~BIT(i));
707 rpmh_vote->cmd_addrs[j] = tmp->cmds[j].addr;
708 }
709 rpmh_vote->cmd_data[i][j] = tmp->cmds[j].data;
710 }
711 }
712}
713
714/*
715 * gmu_bus_vote_init - initialized RPMh votes needed for bw scaling by GMU.
716 * @gmu: Pointer to GMU device
717 * @pwr: Pointer to KGSL power controller
718 */
719static int gmu_bus_vote_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
720{
721 struct msm_bus_tcs_usecase *usecases;
722 struct msm_bus_tcs_handle hdl;
723 struct rpmh_votes_t *votes = &gmu->rpmh_votes;
724 int ret;
725
726 usecases = kcalloc(gmu->num_bwlevels, sizeof(*usecases), GFP_KERNEL);
727 if (!usecases)
728 return -ENOMEM;
729
730 hdl.num_usecases = gmu->num_bwlevels;
731 hdl.usecases = usecases;
732
733 /*
734 * Query TCS command set for each use case defined in GPU b/w table
735 */
736 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->pcl);
737 if (ret)
738 return ret;
739
740 build_rpmh_bw_votes(&votes->ddr_votes, gmu->num_bwlevels, hdl);
741
742 /*
743 *Query CNOC TCS command set for each use case defined in cnoc bw table
744 */
745 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->ccl);
746 if (ret)
747 return ret;
748
749 build_rpmh_bw_votes(&votes->cnoc_votes, gmu->num_cnocbwlevels, hdl);
750
751 kfree(usecases);
752
753 return 0;
754}
755
756int gmu_rpmh_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
757{
758 struct rpmh_arc_vals gfx_arc, cx_arc, mx_arc;
759 int ret;
760
761 /* Populate BW vote table */
762 ret = gmu_bus_vote_init(gmu, pwr);
763 if (ret)
764 return ret;
765
766 /* Populate GPU and GMU frequency vote table */
767 ret = rpmh_arc_cmds(gmu, &gfx_arc, gfx_res_id);
768 if (ret)
769 return ret;
770
771 ret = rpmh_arc_cmds(gmu, &cx_arc, cx_res_id);
772 if (ret)
773 return ret;
774
775 ret = rpmh_arc_cmds(gmu, &mx_arc, mx_res_id);
776 if (ret)
777 return ret;
778
779 ret = rpmh_arc_votes_init(gmu, &gfx_arc, &mx_arc, GPU_ARC_VOTE);
780 if (ret)
781 return ret;
782
783 return rpmh_arc_votes_init(gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE);
784}
785
786static irqreturn_t gmu_irq_handler(int irq, void *data)
787{
788 struct gmu_device *gmu = data;
789 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700790 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800791 unsigned int status = 0;
792
Kyle Piefere7b06b42017-04-06 13:53:01 -0700793 adreno_read_gmureg(ADRENO_DEVICE(device),
794 ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, &status);
795 adreno_write_gmureg(ADRENO_DEVICE(device),
796 ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800797
Kyle Piefere7b06b42017-04-06 13:53:01 -0700798 /* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700799 if (status & GMU_INT_WDOG_BITE) {
Kyle Piefere7b06b42017-04-06 13:53:01 -0700800 dev_err_ratelimited(&gmu->pdev->dev,
801 "GMU watchdog expired interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700802 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
803 adreno_dispatcher_schedule(device);
804 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700805 if (status & GMU_INT_HOST_AHB_BUS_ERR)
806 dev_err_ratelimited(&gmu->pdev->dev,
807 "AHB bus error interrupt received\n");
George Shenc59500e2017-10-11 14:37:32 -0700808 if (status & GMU_INT_FENCE_ERR) {
809 unsigned int fence_status;
810
811 adreno_read_gmureg(ADRENO_DEVICE(device),
812 ADRENO_REG_GMU_AHB_FENCE_STATUS, &fence_status);
813 dev_err_ratelimited(&gmu->pdev->dev,
814 "FENCE error interrupt received %x\n", fence_status);
815 }
816
Kyle Piefere7b06b42017-04-06 13:53:01 -0700817 if (status & ~GMU_AO_INT_MASK)
818 dev_err_ratelimited(&gmu->pdev->dev,
819 "Unhandled GMU interrupts 0x%lx\n",
820 status & ~GMU_AO_INT_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800821
Kyle Piefere7b06b42017-04-06 13:53:01 -0700822 return IRQ_HANDLED;
823}
Kyle Pieferb1027b02017-02-10 13:58:58 -0800824
Kyle Piefere7b06b42017-04-06 13:53:01 -0700825static irqreturn_t hfi_irq_handler(int irq, void *data)
826{
827 struct kgsl_hfi *hfi = data;
828 struct gmu_device *gmu = container_of(hfi, struct gmu_device, hfi);
829 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700830 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -0700831 unsigned int status = 0;
832
833 adreno_read_gmureg(ADRENO_DEVICE(device),
834 ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
835 adreno_write_gmureg(ADRENO_DEVICE(device),
836 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status);
837
838 if (status & HFI_IRQ_MSGQ_MASK)
839 tasklet_hi_schedule(&hfi->tasklet);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700840 if (status & HFI_IRQ_CM3_FAULT_MASK) {
Kyle Piefere7b06b42017-04-06 13:53:01 -0700841 dev_err_ratelimited(&gmu->pdev->dev,
842 "GMU CM3 fault interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700843 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
844 adreno_dispatcher_schedule(device);
845 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700846 if (status & ~HFI_IRQ_MASK)
847 dev_err_ratelimited(&gmu->pdev->dev,
848 "Unhandled HFI interrupts 0x%lx\n",
849 status & ~HFI_IRQ_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800850
851 return IRQ_HANDLED;
852}
853
854static int gmu_pwrlevel_probe(struct gmu_device *gmu, struct device_node *node)
855{
856 struct device_node *pwrlevel_node, *child;
857
858 pwrlevel_node = of_find_node_by_name(node, "qcom,gmu-pwrlevels");
859
860 if (pwrlevel_node == NULL) {
861 dev_err(&gmu->pdev->dev, "Unable to find 'qcom,gmu-pwrlevels'\n");
862 return -EINVAL;
863 }
864
865 gmu->num_gmupwrlevels = 0;
866
867 for_each_child_of_node(pwrlevel_node, child) {
868 unsigned int index;
869
870 if (of_property_read_u32(child, "reg", &index))
871 return -EINVAL;
872
873 if (index >= MAX_CX_LEVELS) {
874 dev_err(&gmu->pdev->dev, "gmu pwrlevel %d is out of range\n",
875 index);
876 continue;
877 }
878
879 if (index >= gmu->num_gmupwrlevels)
880 gmu->num_gmupwrlevels = index + 1;
881
882 if (of_property_read_u32(child, "qcom,gmu-freq",
883 &gmu->gmu_freqs[index]))
884 return -EINVAL;
885 }
886
887 return 0;
888}
889
George Shencc7e1092017-08-30 10:45:52 -0700890static int gmu_reg_probe(struct gmu_device *gmu, const char *name, bool is_gmu)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800891{
892 struct resource *res;
893
894 res = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM, name);
895 if (res == NULL) {
896 dev_err(&gmu->pdev->dev,
897 "platform_get_resource %s failed\n", name);
898 return -EINVAL;
899 }
900
901 if (res->start == 0 || resource_size(res) == 0) {
902 dev_err(&gmu->pdev->dev,
903 "dev %d %s invalid register region\n",
904 gmu->pdev->dev.id, name);
905 return -EINVAL;
906 }
907
George Shencc7e1092017-08-30 10:45:52 -0700908 if (is_gmu) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800909 gmu->reg_phys = res->start;
910 gmu->reg_len = resource_size(res);
911 gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
912 resource_size(res));
913
914 if (gmu->reg_virt == NULL) {
915 dev_err(&gmu->pdev->dev, "GMU regs ioremap failed\n");
916 return -ENODEV;
917 }
918
George Shencc7e1092017-08-30 10:45:52 -0700919 } else {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800920 gmu->pdc_reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
921 resource_size(res));
922 if (gmu->pdc_reg_virt == NULL) {
923 dev_err(&gmu->pdev->dev, "PDC regs ioremap failed\n");
924 return -ENODEV;
925 }
926 }
927
928 return 0;
929}
930
931static int gmu_clocks_probe(struct gmu_device *gmu, struct device_node *node)
932{
933 const char *cname;
934 struct property *prop;
935 struct clk *c;
936 int i = 0;
937
938 of_property_for_each_string(node, "clock-names", prop, cname) {
939 c = devm_clk_get(&gmu->pdev->dev, cname);
940
941 if (IS_ERR(c)) {
942 dev_err(&gmu->pdev->dev,
943 "dt: Couldn't get GMU clock: %s\n", cname);
944 return PTR_ERR(c);
945 }
946
947 if (i >= MAX_GMU_CLKS) {
948 dev_err(&gmu->pdev->dev,
949 "dt: too many GMU clocks defined\n");
950 return -EINVAL;
951 }
952
953 gmu->clks[i++] = c;
954 }
955
956 return 0;
957}
958
959static int gmu_gpu_bw_probe(struct gmu_device *gmu)
960{
961 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
962 struct msm_bus_scale_pdata *bus_scale_table;
963
964 bus_scale_table = msm_bus_cl_get_pdata(device->pdev);
965 if (bus_scale_table == NULL) {
966 dev_err(&gmu->pdev->dev, "dt: cannot get bus table\n");
967 return -ENODEV;
968 }
969
970 gmu->num_bwlevels = bus_scale_table->num_usecases;
971 gmu->pcl = msm_bus_scale_register_client(bus_scale_table);
972 if (!gmu->pcl) {
973 dev_err(&gmu->pdev->dev, "dt: cannot register bus client\n");
974 return -ENODEV;
975 }
976
977 return 0;
978}
979
980static int gmu_cnoc_bw_probe(struct gmu_device *gmu)
981{
982 struct msm_bus_scale_pdata *cnoc_table;
983
984 cnoc_table = msm_bus_cl_get_pdata(gmu->pdev);
985 if (cnoc_table == NULL) {
986 dev_err(&gmu->pdev->dev, "dt: cannot get cnoc table\n");
987 return -ENODEV;
988 }
989
990 gmu->num_cnocbwlevels = cnoc_table->num_usecases;
991 gmu->ccl = msm_bus_scale_register_client(cnoc_table);
992 if (!gmu->ccl) {
993 dev_err(&gmu->pdev->dev, "dt: cannot register cnoc client\n");
994 return -ENODEV;
995 }
996
997 return 0;
998}
999
1000static int gmu_regulators_probe(struct gmu_device *gmu,
1001 struct device_node *node)
1002{
1003 const char *name;
1004 struct property *prop;
1005 struct device *dev = &gmu->pdev->dev;
1006 int ret = 0;
1007
1008 of_property_for_each_string(node, "regulator-names", prop, name) {
1009 if (!strcmp(name, "vddcx")) {
1010 gmu->cx_gdsc = devm_regulator_get(dev, name);
1011 if (IS_ERR(gmu->cx_gdsc)) {
1012 ret = PTR_ERR(gmu->cx_gdsc);
1013 dev_err(dev, "dt: GMU couldn't get CX gdsc\n");
1014 gmu->cx_gdsc = NULL;
1015 return ret;
1016 }
1017 } else if (!strcmp(name, "vdd")) {
1018 gmu->gx_gdsc = devm_regulator_get(dev, name);
1019 if (IS_ERR(gmu->gx_gdsc)) {
1020 ret = PTR_ERR(gmu->gx_gdsc);
1021 dev_err(dev, "dt: GMU couldn't get GX gdsc\n");
1022 gmu->gx_gdsc = NULL;
1023 return ret;
1024 }
1025 } else {
1026 dev_err(dev, "dt: Unknown GMU regulator: %s\n", name);
1027 return -ENODEV;
1028 }
1029 }
1030
1031 return 0;
1032}
1033
Kyle Piefere7b06b42017-04-06 13:53:01 -07001034static int gmu_irq_probe(struct gmu_device *gmu)
1035{
1036 int ret;
1037 struct kgsl_hfi *hfi = &gmu->hfi;
1038
1039 hfi->hfi_interrupt_num = platform_get_irq_byname(gmu->pdev,
1040 "kgsl_hfi_irq");
1041 ret = devm_request_irq(&gmu->pdev->dev,
1042 hfi->hfi_interrupt_num,
1043 hfi_irq_handler, IRQF_TRIGGER_HIGH,
1044 "HFI", hfi);
1045 if (ret) {
1046 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1047 hfi->hfi_interrupt_num, ret);
1048 return ret;
1049 }
1050
1051 gmu->gmu_interrupt_num = platform_get_irq_byname(gmu->pdev,
1052 "kgsl_gmu_irq");
1053 ret = devm_request_irq(&gmu->pdev->dev,
1054 gmu->gmu_interrupt_num,
1055 gmu_irq_handler, IRQF_TRIGGER_HIGH,
1056 "GMU", gmu);
1057 if (ret)
1058 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1059 gmu->gmu_interrupt_num, ret);
1060
1061 return ret;
1062}
1063
1064static void gmu_irq_enable(struct kgsl_device *device)
1065{
1066 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1067 struct gmu_device *gmu = &device->gmu;
1068 struct kgsl_hfi *hfi = &gmu->hfi;
1069
1070 /* Clear any pending IRQs before unmasking on GMU */
1071 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1072 0xFFFFFFFF);
1073 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1074 0xFFFFFFFF);
1075
1076 /* Unmask needed IRQs on GMU */
1077 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1078 (unsigned int) ~HFI_IRQ_MASK);
1079 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1080 (unsigned int) ~GMU_AO_INT_MASK);
1081
1082 /* Enable all IRQs on host */
1083 enable_irq(hfi->hfi_interrupt_num);
1084 enable_irq(gmu->gmu_interrupt_num);
1085}
1086
1087static void gmu_irq_disable(struct kgsl_device *device)
1088{
1089 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1090 struct gmu_device *gmu = &device->gmu;
1091 struct kgsl_hfi *hfi = &gmu->hfi;
1092
1093 /* Disable all IRQs on host */
1094 disable_irq(gmu->gmu_interrupt_num);
1095 disable_irq(hfi->hfi_interrupt_num);
1096
1097 /* Mask all IRQs on GMU */
1098 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1099 0xFFFFFFFF);
1100 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1101 0xFFFFFFFF);
1102
1103 /* Clear any pending IRQs before disabling */
1104 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1105 0xFFFFFFFF);
1106 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1107 0xFFFFFFFF);
1108}
1109
Kyle Pieferb1027b02017-02-10 13:58:58 -08001110/* Do not access any GMU registers in GMU probe function */
1111int gmu_probe(struct kgsl_device *device)
1112{
1113 struct device_node *node;
1114 struct gmu_device *gmu = &device->gmu;
1115 struct gmu_memdesc *mem_addr = NULL;
1116 struct kgsl_hfi *hfi = &gmu->hfi;
1117 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Kyle Pieferd3964162017-04-06 15:44:03 -07001118 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001119 int i = 0, ret = -ENXIO;
1120
1121 node = of_find_compatible_node(device->pdev->dev.of_node,
1122 NULL, "qcom,gpu-gmu");
1123
1124 if (node == NULL)
1125 return ret;
1126
1127 device->gmu.pdev = of_find_device_by_node(node);
1128
1129 /* Set up GMU regulators */
1130 ret = gmu_regulators_probe(gmu, node);
1131 if (ret)
1132 goto error;
1133
1134 /* Set up GMU clocks */
1135 ret = gmu_clocks_probe(gmu, node);
1136 if (ret)
1137 goto error;
1138
1139 /* Set up GMU IOMMU and shared memory with GMU */
1140 ret = gmu_memory_probe(&device->gmu, node);
1141 if (ret)
1142 goto error;
1143 mem_addr = gmu->hfi_mem;
1144
1145 /* Map and reserve GMU CSRs registers */
George Shencc7e1092017-08-30 10:45:52 -07001146 ret = gmu_reg_probe(gmu, "kgsl_gmu_reg", true);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001147 if (ret)
1148 goto error;
1149
George Shencc7e1092017-08-30 10:45:52 -07001150 ret = gmu_reg_probe(gmu, "kgsl_gmu_pdc_reg", false);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001151 if (ret)
1152 goto error;
1153
1154 gmu->gmu2gpu_offset = (gmu->reg_phys - device->reg_phys) >> 2;
1155
Kyle Piefere7b06b42017-04-06 13:53:01 -07001156 /* Initialize HFI and GMU interrupts */
1157 ret = gmu_irq_probe(gmu);
1158 if (ret)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001159 goto error;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001160
1161 /* Don't enable GMU interrupts until GMU started */
Kyle Piefere7b06b42017-04-06 13:53:01 -07001162 /* We cannot use gmu_irq_disable because it writes registers */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001163 disable_irq(gmu->gmu_interrupt_num);
1164 disable_irq(hfi->hfi_interrupt_num);
1165
1166 tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long)gmu);
1167 INIT_LIST_HEAD(&hfi->msglist);
1168 spin_lock_init(&hfi->msglock);
1169
1170 /* Retrieves GMU/GPU power level configurations*/
1171 ret = gmu_pwrlevel_probe(gmu, node);
1172 if (ret)
1173 goto error;
1174
1175 gmu->num_gpupwrlevels = pwr->num_pwrlevels;
1176
1177 for (i = 0; i < gmu->num_gpupwrlevels; i++) {
1178 int j = gmu->num_gpupwrlevels - 1 - i;
1179
1180 gmu->gpu_freqs[i] = pwr->pwrlevels[j].gpu_freq;
1181 }
1182
1183 /* Initializes GPU b/w levels configuration */
1184 ret = gmu_gpu_bw_probe(gmu);
1185 if (ret)
1186 goto error;
1187
1188 /* Initialize GMU CNOC b/w levels configuration */
1189 ret = gmu_cnoc_bw_probe(gmu);
1190 if (ret)
1191 goto error;
1192
1193 /* Populates RPMh configurations */
1194 ret = gmu_rpmh_init(gmu, pwr);
1195 if (ret)
1196 goto error;
1197
1198 hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
1199
Kyle Pieferd3964162017-04-06 15:44:03 -07001200 /* Set up GMU idle states */
1201 if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT))
1202 gmu->idle_level = GPU_HW_MIN_VOLT;
1203 else if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP))
1204 gmu->idle_level = GPU_HW_NAP;
1205 else if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
1206 gmu->idle_level = GPU_HW_IFPC;
1207 else if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
1208 gmu->idle_level = GPU_HW_SPTP_PC;
1209 else
1210 gmu->idle_level = GPU_HW_ACTIVE;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001211
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07001212 /* disable LM during boot time */
1213 clear_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001214 return 0;
1215
1216error:
1217 gmu_remove(device);
1218 return ret;
1219}
1220
1221
1222
1223static int gmu_enable_clks(struct gmu_device *gmu)
1224{
1225 int ret, j = 0;
1226
1227 if (IS_ERR_OR_NULL(gmu->clks[0]))
1228 return -EINVAL;
1229
1230 ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1231 if (ret) {
1232 dev_err(&gmu->pdev->dev, "fail to set default GMU clk freq %d\n",
1233 gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1234 return ret;
1235 }
1236
1237 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1238 ret = clk_prepare_enable(gmu->clks[j]);
1239 if (ret) {
1240 dev_err(&gmu->pdev->dev,
1241 "fail to enable gpucc clk idx %d\n",
1242 j);
1243 return ret;
1244 }
1245 j++;
1246 }
1247
1248 set_bit(GMU_CLK_ON, &gmu->flags);
1249 return 0;
1250}
1251
1252static int gmu_disable_clks(struct gmu_device *gmu)
1253{
Kyle Pieferde855722017-07-07 12:18:59 -07001254 int j = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001255
1256 if (IS_ERR_OR_NULL(gmu->clks[0]))
1257 return 0;
1258
Kyle Pieferb1027b02017-02-10 13:58:58 -08001259 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1260 clk_disable_unprepare(gmu->clks[j]);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001261 j++;
1262 }
1263
1264 clear_bit(GMU_CLK_ON, &gmu->flags);
1265 return 0;
1266
1267}
1268
1269static int gmu_enable_gdsc(struct gmu_device *gmu)
1270{
1271 int ret;
1272
1273 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1274 return 0;
1275
1276 ret = regulator_enable(gmu->cx_gdsc);
1277 if (ret)
1278 dev_err(&gmu->pdev->dev,
1279 "Failed to enable GMU CX gdsc, error %d\n", ret);
1280
1281 return ret;
1282}
1283
George Shenccf7ab42017-10-16 17:22:43 -07001284#define CX_GDSC_TIMEOUT 5000 /* ms */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001285static int gmu_disable_gdsc(struct gmu_device *gmu)
1286{
1287 int ret;
George Shen433b0c72017-06-12 09:44:34 -07001288 unsigned long t;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001289
1290 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1291 return 0;
1292
1293 ret = regulator_disable(gmu->cx_gdsc);
George Shen433b0c72017-06-12 09:44:34 -07001294 if (ret) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001295 dev_err(&gmu->pdev->dev,
1296 "Failed to disable GMU CX gdsc, error %d\n", ret);
George Shen433b0c72017-06-12 09:44:34 -07001297 return ret;
1298 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001299
George Shen433b0c72017-06-12 09:44:34 -07001300 /*
1301 * After GX GDSC is off, CX GDSC must be off
1302 * Voting off alone from GPU driver cannot
George Shenccf7ab42017-10-16 17:22:43 -07001303 * Guarantee CX GDSC off. Polling with 5s
George Shen433b0c72017-06-12 09:44:34 -07001304 * timeout to ensure
1305 */
1306 t = jiffies + msecs_to_jiffies(CX_GDSC_TIMEOUT);
1307 do {
1308 if (!regulator_is_enabled(gmu->cx_gdsc))
1309 return 0;
Oleg Pereletf02d27b2018-01-15 13:22:47 -08001310 usleep_range(10, 100);
George Shen433b0c72017-06-12 09:44:34 -07001311
1312 } while (!(time_after(jiffies, t)));
1313
Oleg Pereletf02d27b2018-01-15 13:22:47 -08001314 if (!regulator_is_enabled(gmu->cx_gdsc))
1315 return 0;
1316
George Shen433b0c72017-06-12 09:44:34 -07001317 dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout");
1318 return -ETIMEDOUT;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001319}
1320
Kyle Piefere923b7a2017-03-28 17:31:48 -07001321static int gmu_suspend(struct kgsl_device *device)
1322{
1323 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1324 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1325 struct gmu_device *gmu = &device->gmu;
1326
1327 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1328 return 0;
1329
1330 /* Pending message in all queues are abandoned */
1331 hfi_stop(gmu);
1332 clear_bit(GMU_HFI_ON, &gmu->flags);
1333 gmu_irq_disable(device);
1334
1335 if (gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0))
1336 return -EINVAL;
1337
1338 gmu_disable_clks(gmu);
1339 gmu_disable_gdsc(gmu);
George Shenf135a972017-08-24 16:59:42 -07001340 dev_err(&gmu->pdev->dev, "Suspended GMU\n");
Kyle Piefere923b7a2017-03-28 17:31:48 -07001341 return 0;
1342}
1343
George Shenf6c15bd2017-11-01 12:22:12 -07001344void gmu_snapshot(struct kgsl_device *device)
George Shen6927d8f2017-07-19 11:38:10 -07001345{
1346 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1347 struct gmu_device *gmu = &device->gmu;
1348
George Shena4114092017-12-01 11:27:47 -08001349 /* Mask so there's no interrupt caused by NMI */
1350 adreno_write_gmureg(adreno_dev,
1351 ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
George Shen6927d8f2017-07-19 11:38:10 -07001352
George Shena4114092017-12-01 11:27:47 -08001353 /* Make sure the interrupt is masked before causing it */
1354 wmb();
1355 adreno_write_gmureg(adreno_dev,
1356 ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
1357 adreno_write_gmureg(adreno_dev,
1358 ADRENO_REG_GMU_CM3_CFG, (1 << 9));
George Shen6927d8f2017-07-19 11:38:10 -07001359
George Shena4114092017-12-01 11:27:47 -08001360 /* Wait for the NMI to be handled */
1361 wmb();
1362 udelay(100);
1363 kgsl_device_snapshot(device, NULL, true);
George Shen6927d8f2017-07-19 11:38:10 -07001364
George Shena4114092017-12-01 11:27:47 -08001365 adreno_write_gmureg(adreno_dev,
1366 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, 0xFFFFFFFF);
1367 adreno_write_gmureg(adreno_dev,
1368 ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1369 (unsigned int) ~HFI_IRQ_MASK);
George Shenf135a972017-08-24 16:59:42 -07001370
1371 gmu->fault_count++;
George Shen6927d8f2017-07-19 11:38:10 -07001372}
1373
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301374static void gmu_change_gpu_pwrlevel(struct kgsl_device *device,
1375 unsigned int new_level) {
1376
1377 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1378 unsigned int old_level = pwr->active_pwrlevel;
1379
1380 /*
1381 * Update the level according to any thermal,
1382 * max/min, or power constraints.
1383 */
1384 new_level = kgsl_pwrctrl_adjust_pwrlevel(device, new_level);
1385
1386 /*
1387 * If thermal cycling is required and the new level hits the
1388 * thermal limit, kick off the cycling.
1389 */
1390 kgsl_pwrctrl_set_thermal_cycle(device, new_level);
1391
1392 pwr->active_pwrlevel = new_level;
1393 pwr->previous_pwrlevel = old_level;
1394
1395 /* Request adjusted DCVS level */
1396 kgsl_clk_set_rate(device, pwr->active_pwrlevel);
1397}
1398
Kyle Pieferb1027b02017-02-10 13:58:58 -08001399/* To be called to power on both GPU and GMU */
1400int gmu_start(struct kgsl_device *device)
1401{
Kyle Pieferb801ab92017-07-13 14:54:13 -07001402 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001403 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1404 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1405 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1406 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001407
Kyle Piefere923b7a2017-03-28 17:31:48 -07001408 switch (device->state) {
1409 case KGSL_STATE_INIT:
1410 case KGSL_STATE_SUSPEND:
1411 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1412 gmu_enable_gdsc(gmu);
1413 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001414 gmu_irq_enable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001415
Kyle Pieferb1027b02017-02-10 13:58:58 -08001416 /* Vote for 300MHz DDR for GMU to init */
1417 ret = msm_bus_scale_client_update_request(gmu->pcl,
Kyle Pieferb801ab92017-07-13 14:54:13 -07001418 pwr->pwrlevels[pwr->default_pwrlevel].bus_freq);
George Shenf135a972017-08-24 16:59:42 -07001419 if (ret)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001420 dev_err(&gmu->pdev->dev,
George Shenf135a972017-08-24 16:59:42 -07001421 "Failed to allocate gmu b/w: %d\n", ret);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001422
1423 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1424 GMU_COLD_BOOT, 0);
1425 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001426 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001427
1428 ret = hfi_start(gmu, GMU_COLD_BOOT);
1429 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001430 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001431
Oleg Perelet5306240a2017-09-19 12:32:51 -07001432 /* Request default DCVS level */
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301433 gmu_change_gpu_pwrlevel(device, pwr->default_pwrlevel);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001434 msm_bus_scale_client_update_request(gmu->pcl, 0);
1435 break;
1436
1437 case KGSL_STATE_SLUMBER:
1438 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1439 gmu_enable_gdsc(gmu);
1440 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001441 gmu_irq_enable(device);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001442
Kyle Pieferb1027b02017-02-10 13:58:58 -08001443 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1444 GMU_WARM_BOOT, 0);
1445 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001446 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001447
1448 ret = hfi_start(gmu, GMU_WARM_BOOT);
1449 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001450 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001451
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301452 gmu_change_gpu_pwrlevel(device, pwr->default_pwrlevel);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001453 break;
1454
1455 case KGSL_STATE_RESET:
George Shenf135a972017-08-24 16:59:42 -07001456 if (test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv) ||
1457 test_bit(GMU_FAULT, &gmu->flags)) {
Kyle Piefere923b7a2017-03-28 17:31:48 -07001458 gmu_suspend(device);
1459 gmu_enable_gdsc(gmu);
1460 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001461 gmu_irq_enable(device);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001462
Kyle Piefere923b7a2017-03-28 17:31:48 -07001463 ret = gpudev->rpmh_gpu_pwrctrl(
1464 adreno_dev, GMU_FW_START, GMU_RESET, 0);
1465 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001466 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001467
Kyle Piefere923b7a2017-03-28 17:31:48 -07001468
Kyle Piefer7a714cd2017-06-21 15:55:47 -07001469 ret = hfi_start(gmu, GMU_COLD_BOOT);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001470 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001471 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001472
1473 /* Send DCVS level prior to reset*/
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301474 gmu_change_gpu_pwrlevel(device,
Oleg Perelet5306240a2017-09-19 12:32:51 -07001475 pwr->default_pwrlevel);
Kyle Piefer42de1402017-09-15 11:28:47 -07001476 } else {
1477 /* GMU fast boot */
1478 hfi_stop(gmu);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001479
Kyle Piefer42de1402017-09-15 11:28:47 -07001480 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1481 GMU_RESET, 0);
1482 if (ret)
1483 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001484
Kyle Piefer42de1402017-09-15 11:28:47 -07001485 ret = hfi_start(gmu, GMU_WARM_BOOT);
1486 if (ret)
1487 goto error_gmu;
1488 }
Kyle Piefere923b7a2017-03-28 17:31:48 -07001489 break;
1490 default:
1491 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001492 }
1493
Kyle Piefere923b7a2017-03-28 17:31:48 -07001494 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001495
George Shenf135a972017-08-24 16:59:42 -07001496error_gmu:
Kyle Piefer42de1402017-09-15 11:28:47 -07001497 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1498 gpudev->oob_clear(adreno_dev,
1499 OOB_BOOT_SLUMBER_CLEAR_MASK);
George Shen6927d8f2017-07-19 11:38:10 -07001500 gmu_snapshot(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001501 return ret;
1502}
1503
1504/* Caller shall ensure GPU is ready for SLUMBER */
1505void gmu_stop(struct kgsl_device *device)
1506{
1507 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001508 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1509 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001510 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001511
1512 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1513 return;
1514
Kyle Piefer4033f562017-08-16 10:00:48 -07001515 /* Wait for the lowest idle level we requested */
1516 if (gpudev->wait_for_lowest_idle &&
1517 gpudev->wait_for_lowest_idle(adreno_dev))
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001518 goto error;
Carter Cooper1ee715a2017-09-07 16:08:38 -06001519
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001520 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
1521 if (ret)
1522 goto error;
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001523
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001524 if (gpudev->wait_for_gmu_idle &&
1525 gpudev->wait_for_gmu_idle(adreno_dev))
1526 goto error;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001527
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001528 /* Pending message in all queues are abandoned */
1529 hfi_stop(gmu);
1530 clear_bit(GMU_HFI_ON, &gmu->flags);
1531 gmu_irq_disable(device);
1532
1533 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
1534 gmu_disable_clks(gmu);
1535 gmu_disable_gdsc(gmu);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001536
1537 msm_bus_scale_client_update_request(gmu->pcl, 0);
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001538 return;
1539
1540error:
1541 /*
1542 * The power controller will change state to SLUMBER anyway
1543 * Set GMU_FAULT flag to indicate to power contrller
1544 * that hang recovery is needed to power on GPU
1545 */
1546 set_bit(GMU_FAULT, &gmu->flags);
1547 dev_err(&gmu->pdev->dev, "Failed to stop GMU\n");
1548 gmu_snapshot(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001549}
1550
1551void gmu_remove(struct kgsl_device *device)
1552{
1553 struct gmu_device *gmu = &device->gmu;
1554 struct kgsl_hfi *hfi = &gmu->hfi;
Kyle Piefer8570d512017-04-21 14:50:51 -07001555 int i = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001556
1557 if (!device->gmu.pdev)
1558 return;
1559
1560 tasklet_kill(&hfi->tasklet);
1561
1562 gmu_stop(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -07001563 gmu_irq_disable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001564
Kyle Piefer8570d512017-04-21 14:50:51 -07001565 while ((i < MAX_GMU_CLKS) && gmu->clks[i]) {
1566 gmu->clks[i] = NULL;
1567 i++;
1568 }
1569
Kyle Pieferf7febd62017-03-20 16:49:49 -07001570 if (gmu->gmu_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001571 devm_free_irq(&gmu->pdev->dev,
1572 gmu->gmu_interrupt_num, gmu);
1573 gmu->gmu_interrupt_num = 0;
1574 }
1575
1576 if (hfi->hfi_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001577 devm_free_irq(&gmu->pdev->dev,
Kyle Piefercec5e212017-05-19 13:15:15 -07001578 hfi->hfi_interrupt_num, hfi);
Kyle Pieferf7febd62017-03-20 16:49:49 -07001579 hfi->hfi_interrupt_num = 0;
1580 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001581
1582 if (gmu->ccl) {
1583 msm_bus_scale_unregister_client(gmu->ccl);
1584 gmu->ccl = 0;
1585 }
1586
1587 if (gmu->pcl) {
1588 msm_bus_scale_unregister_client(gmu->pcl);
1589 gmu->pcl = 0;
1590 }
1591
1592 if (gmu->pdc_reg_virt) {
1593 devm_iounmap(&gmu->pdev->dev, gmu->pdc_reg_virt);
1594 gmu->pdc_reg_virt = NULL;
1595 }
1596
1597 if (gmu->reg_virt) {
1598 devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001599 gmu->reg_virt = NULL;
1600 }
1601
1602 if (gmu->hfi_mem || gmu->dump_mem)
1603 gmu_memory_close(&device->gmu);
1604
1605 for (i = 0; i < MAX_GMU_CLKS; i++) {
1606 if (gmu->clks[i]) {
1607 devm_clk_put(&gmu->pdev->dev, gmu->clks[i]);
1608 gmu->clks[i] = NULL;
1609 }
1610 }
1611
1612 if (gmu->gx_gdsc) {
1613 devm_regulator_put(gmu->gx_gdsc);
1614 gmu->gx_gdsc = NULL;
1615 }
1616
1617 if (gmu->cx_gdsc) {
1618 devm_regulator_put(gmu->cx_gdsc);
1619 gmu->cx_gdsc = NULL;
1620 }
1621
1622 device->gmu.pdev = NULL;
1623}
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001624
1625/*
1626 * adreno_gmu_fenced_write() - Check if there is a GMU and it is enabled
1627 * @adreno_dev: Pointer to the Adreno device device that owns the GMU
1628 * @offset: 32bit register enum that is to be written
1629 * @val: The value to be written to the register
1630 * @fence_mask: The value to poll the fence status register
1631 *
1632 * Check the WRITEDROPPED0/1 bit in the FENCE_STATUS regsiter to check if
1633 * the write to the fenced register went through. If it didn't then we retry
1634 * the write until it goes through or we time out.
1635 */
Harshdeep Dhatt56107782017-12-05 09:54:47 -07001636int adreno_gmu_fenced_write(struct adreno_device *adreno_dev,
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001637 enum adreno_regs offset, unsigned int val,
1638 unsigned int fence_mask)
1639{
1640 unsigned int status, i;
Harshdeep Dhattd88f10e2018-01-12 15:16:54 -07001641 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1642 unsigned int reg_offset = gpudev->reg_offsets->offsets[offset];
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001643
1644 adreno_writereg(adreno_dev, offset, val);
1645
1646 if (!kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
Harshdeep Dhatt56107782017-12-05 09:54:47 -07001647 return 0;
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001648
1649 for (i = 0; i < GMU_WAKEUP_RETRY_MAX; i++) {
1650 adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_AHB_FENCE_STATUS,
1651 &status);
1652
1653 /*
1654 * If !writedropped0/1, then the write to fenced register
1655 * was successful
1656 */
1657 if (!(status & fence_mask))
Harshdeep Dhatt56107782017-12-05 09:54:47 -07001658 return 0;
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001659 /* Wait a small amount of time before trying again */
1660 udelay(GMU_WAKEUP_DELAY_US);
1661
1662 /* Try to write the fenced register again */
1663 adreno_writereg(adreno_dev, offset, val);
1664 }
1665
1666 dev_err(adreno_dev->dev.dev,
Harshdeep Dhattd88f10e2018-01-12 15:16:54 -07001667 "GMU fenced register write timed out: reg 0x%x\n", reg_offset);
Harshdeep Dhatt56107782017-12-05 09:54:47 -07001668 return -ETIMEDOUT;
Harshdeep Dhatt8f78d5f2017-11-01 14:24:36 -06001669}