blob: 7708ed799b987b0f4335b5b78aa2fc32de90bcb1 [file] [log] [blame]
Kyle Pieferb1027b02017-02-10 13:58:58 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Abhilash Kumarb07f9b02017-09-26 15:29:34 +053013#include <linux/module.h>
Kyle Pieferb1027b02017-02-10 13:58:58 -080014#include <linux/types.h>
15#include <linux/device.h>
16#include <linux/iommu.h>
17#include <linux/of_platform.h>
18#include <linux/msm-bus.h>
19#include <linux/msm-bus-board.h>
20#include <linux/pm_opp.h>
Patrick Dalyde1c64d2017-09-12 16:30:12 -070021#include <linux/io.h>
Kyle Pieferb1027b02017-02-10 13:58:58 -080022#include <soc/qcom/cmd-db.h>
23
24#include "kgsl_device.h"
25#include "kgsl_gmu.h"
26#include "kgsl_hfi.h"
27#include "a6xx_reg.h"
28#include "adreno.h"
29
Abhilash Kumarb07f9b02017-09-26 15:29:34 +053030#undef MODULE_PARAM_PREFIX
31#define MODULE_PARAM_PREFIX "kgsl_gmu."
32
33static bool nogmu;
34module_param(nogmu, bool, 0444);
35MODULE_PARM_DESC(nogmu, "Disable the GMU");
36
Kyle Pieferb1027b02017-02-10 13:58:58 -080037#define GMU_CONTEXT_USER 0
38#define GMU_CONTEXT_KERNEL 1
39#define GMU_KERNEL_ENTRIES 8
40
41enum gmu_iommu_mem_type {
42 GMU_CACHED_CODE,
43 GMU_CACHED_DATA,
44 GMU_NONCACHED_KERNEL,
45 GMU_NONCACHED_USER
46};
47
48/*
49 * GMU virtual memory mapping definitions
50 */
51struct gmu_vma {
52 unsigned int noncached_ustart;
53 unsigned int noncached_usize;
54 unsigned int noncached_kstart;
55 unsigned int noncached_ksize;
56 unsigned int cached_dstart;
57 unsigned int cached_dsize;
58 unsigned int cached_cstart;
59 unsigned int cached_csize;
60 unsigned int image_start;
61};
62
George Shena2f7b432017-08-18 12:58:18 -070063static void gmu_snapshot(struct kgsl_device *device);
64
Kyle Pieferb1027b02017-02-10 13:58:58 -080065struct gmu_iommu_context {
66 const char *name;
67 struct device *dev;
68 struct iommu_domain *domain;
69};
70
71#define HFIMEM_SIZE SZ_16K
72
73#define DUMPMEM_SIZE SZ_16K
74
75/* Define target specific GMU VMA configurations */
76static const struct gmu_vma vma = {
77 /* Noncached user segment */
78 0x80000000, SZ_1G,
79 /* Noncached kernel segment */
80 0x60000000, SZ_512M,
81 /* Cached data segment */
82 0x44000, (SZ_256K-SZ_16K),
83 /* Cached code segment */
84 0x0, (SZ_256K-SZ_16K),
85 /* FW image */
86 0x0,
87};
88
89struct gmu_iommu_context gmu_ctx[] = {
90 [GMU_CONTEXT_USER] = { .name = "gmu_user" },
91 [GMU_CONTEXT_KERNEL] = { .name = "gmu_kernel" }
92};
93
94/*
95 * There are a few static memory buffers that are allocated and mapped at boot
96 * time for GMU to function. The buffers are permanent (not freed) after
97 * GPU boot. The size of the buffers are constant and not expected to change.
98 *
99 * We define an array and a simple allocator to keep track of the currently
100 * active SMMU entries of GMU kernel mode context. Each entry is assigned
101 * a unique address inside GMU kernel mode address range. The addresses
102 * are assigned sequentially and aligned to 1MB each.
103 *
104 */
105static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES];
106static unsigned long gmu_kmem_bitmap;
107
Kyle Piefer11a48b62017-03-17 14:53:40 -0700108/*
109 * kgsl_gmu_isenabled() - Check if there is a GMU and it is enabled
110 * @device: Pointer to the KGSL device that owns the GMU
111 *
112 * Check if a GMU has been found and successfully probed. Also
113 * check that the feature flag to use a GMU is enabled. Returns
114 * true if both of these conditions are met, otherwise false.
115 */
116bool kgsl_gmu_isenabled(struct kgsl_device *device)
117{
118 struct gmu_device *gmu = &device->gmu;
119 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
120
Abhilash Kumarb07f9b02017-09-26 15:29:34 +0530121 if (!nogmu && gmu->pdev &&
122 ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
Kyle Piefer11a48b62017-03-17 14:53:40 -0700123 return true;
Kyle Piefer11a48b62017-03-17 14:53:40 -0700124 return false;
125}
126
Kyle Pieferb1027b02017-02-10 13:58:58 -0800127static int _gmu_iommu_fault_handler(struct device *dev,
128 unsigned long addr, int flags, const char *name)
129{
130 char *fault_type = "unknown";
131
132 if (flags & IOMMU_FAULT_TRANSLATION)
133 fault_type = "translation";
134 else if (flags & IOMMU_FAULT_PERMISSION)
135 fault_type = "permission";
136
137 dev_err(dev, "GMU fault addr = %lX, context=%s (%s %s fault)\n",
138 addr, name,
139 (flags & IOMMU_FAULT_WRITE) ? "write" : "read",
140 fault_type);
141
142 return 0;
143}
144
145static int gmu_kernel_fault_handler(struct iommu_domain *domain,
146 struct device *dev, unsigned long addr, int flags, void *token)
147{
148 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_kernel");
149}
150
151static int gmu_user_fault_handler(struct iommu_domain *domain,
152 struct device *dev, unsigned long addr, int flags, void *token)
153{
154 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_user");
155}
156
157static void free_gmu_mem(struct gmu_device *gmu,
158 struct gmu_memdesc *md)
159{
160 /* Free GMU image memory */
161 if (md->hostptr)
162 dma_free_attrs(&gmu->pdev->dev, (size_t) md->size,
163 (void *)md->hostptr, md->physaddr, 0);
164 memset(md, 0, sizeof(*md));
165}
166
167static int alloc_and_map(struct gmu_device *gmu, unsigned int ctx_id,
168 struct gmu_memdesc *md, unsigned int attrs)
169{
170 int ret;
171 struct iommu_domain *domain;
172
173 domain = gmu_ctx[ctx_id].domain;
174
175 md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size,
176 &md->physaddr, GFP_KERNEL, 0);
177
178 if (md->hostptr == NULL)
179 return -ENOMEM;
180
181 ret = iommu_map(domain, md->gmuaddr,
182 md->physaddr, md->size,
183 attrs);
184
185 if (ret) {
186 dev_err(&gmu->pdev->dev,
187 "gmu map err: gaddr=0x%016llX, paddr=0x%016llX\n",
188 md->gmuaddr, md->physaddr);
189 free_gmu_mem(gmu, md);
190 }
191
192 return ret;
193}
194
195/*
196 * allocate_gmu_image() - allocates & maps memory for FW image, the size
197 * shall come from the loaded f/w file. Firmware image size shall be
198 * less than code cache size. Otherwise, FW may experience performance issue.
199 * @gmu: Pointer to GMU device
200 * @size: Requested allocation size
201 */
202int allocate_gmu_image(struct gmu_device *gmu, unsigned int size)
203{
204 struct gmu_memdesc *md = &gmu->fw_image;
205
206 if (size > vma.cached_csize) {
207 dev_err(&gmu->pdev->dev,
208 "GMU firmware size too big: %d\n", size);
209 return -EINVAL;
210 }
211
212 md->size = size;
213 md->gmuaddr = vma.image_start;
214 md->attr = GMU_CACHED_CODE;
215
216 return alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, IOMMU_READ);
217}
218
219/*
220 * allocate_gmu_kmem() - allocates and maps GMU kernel shared memory
221 * @gmu: Pointer to GMU device
222 * @size: Requested size
223 * @attrs: IOMMU mapping attributes
224 */
225static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
226 unsigned int size, unsigned int attrs)
227{
228 struct gmu_memdesc *md;
229 int ret, entry_idx = find_first_zero_bit(
230 &gmu_kmem_bitmap, GMU_KERNEL_ENTRIES);
231
232 size = PAGE_ALIGN(size);
233
234 if (size > SZ_1M || size == 0) {
235 dev_err(&gmu->pdev->dev,
236 "Requested %d bytes of GMU kernel memory, max=1MB\n",
237 size);
238 return ERR_PTR(-EINVAL);
239 }
240
241 if (entry_idx >= GMU_KERNEL_ENTRIES) {
242 dev_err(&gmu->pdev->dev,
243 "Ran out of GMU kernel mempool slots\n");
244 return ERR_PTR(-EINVAL);
245 }
246
247 /* Allocate GMU virtual memory */
248 md = &gmu_kmem_entries[entry_idx];
249 md->gmuaddr = vma.noncached_kstart + (entry_idx * SZ_1M);
250 set_bit(entry_idx, &gmu_kmem_bitmap);
251 md->attr = GMU_NONCACHED_KERNEL;
252 md->size = size;
253
254 ret = alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, attrs);
255
256 if (ret) {
257 clear_bit(entry_idx, &gmu_kmem_bitmap);
258 md->gmuaddr = 0;
259 return ERR_PTR(ret);
260 }
261
262 return md;
263}
264
265static int gmu_iommu_cb_probe(struct gmu_device *gmu,
266 struct gmu_iommu_context *ctx,
267 struct device_node *node)
268{
269 struct platform_device *pdev = of_find_device_by_node(node);
270 struct device *dev;
271 int ret;
272
273 dev = &pdev->dev;
274
275 ctx->dev = dev;
276 ctx->domain = iommu_domain_alloc(&platform_bus_type);
277 if (ctx->domain == NULL) {
278 dev_err(&gmu->pdev->dev, "gmu iommu fail to alloc %s domain\n",
279 ctx->name);
280 return -ENODEV;
281 }
282
283 ret = iommu_attach_device(ctx->domain, dev);
284 if (ret) {
285 dev_err(&gmu->pdev->dev, "gmu iommu fail to attach %s device\n",
286 ctx->name);
287 iommu_domain_free(ctx->domain);
288 }
289
290 return ret;
291}
292
293static struct {
294 const char *compatible;
295 int index;
296 iommu_fault_handler_t hdlr;
297} cbs[] = {
298 { "qcom,smmu-gmu-user-cb",
299 GMU_CONTEXT_USER,
300 gmu_user_fault_handler,
301 },
302 { "qcom,smmu-gmu-kernel-cb",
303 GMU_CONTEXT_KERNEL,
304 gmu_kernel_fault_handler,
305 },
306};
307
308/*
309 * gmu_iommu_init() - probe IOMMU context banks used by GMU
310 * and attach GMU device
311 * @gmu: Pointer to GMU device
312 * @node: Pointer to GMU device node
313 */
314int gmu_iommu_init(struct gmu_device *gmu, struct device_node *node)
315{
316 struct device_node *child;
317 struct gmu_iommu_context *ctx = NULL;
318 int ret, i;
319
320 of_platform_populate(node, NULL, NULL, &gmu->pdev->dev);
321
322 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
323 child = of_find_compatible_node(node, NULL, cbs[i].compatible);
324 if (child) {
325 ctx = &gmu_ctx[cbs[i].index];
326 ret = gmu_iommu_cb_probe(gmu, ctx, child);
327 if (ret)
328 return ret;
329 iommu_set_fault_handler(ctx->domain,
330 cbs[i].hdlr, ctx);
331 }
332 }
333
334 for (i = 0; i < ARRAY_SIZE(gmu_ctx); i++) {
335 if (gmu_ctx[i].domain == NULL) {
336 dev_err(&gmu->pdev->dev,
337 "Missing GMU %s context bank node\n",
338 gmu_ctx[i].name);
339 return -EINVAL;
340 }
341 }
342
343 return 0;
344}
345
346/*
347 * gmu_kmem_close() - free all kernel memory allocated for GMU and detach GMU
348 * from IOMMU context banks.
349 * @gmu: Pointer to GMU device
350 */
351void gmu_kmem_close(struct gmu_device *gmu)
352{
353 int i;
354 struct gmu_memdesc *md = &gmu->fw_image;
355 struct gmu_iommu_context *ctx = &gmu_ctx[GMU_CONTEXT_KERNEL];
356
357 /* Free GMU image memory */
358 free_gmu_mem(gmu, md);
359
360 /* Unmap image memory */
361 iommu_unmap(ctx->domain,
362 gmu->fw_image.gmuaddr,
363 gmu->fw_image.size);
364
365
366 gmu->hfi_mem = NULL;
367 gmu->dump_mem = NULL;
368
369 /* Unmap all memories in GMU kernel memory pool */
370 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
371 struct gmu_memdesc *memptr = &gmu_kmem_entries[i];
372
373 if (memptr->gmuaddr)
374 iommu_unmap(ctx->domain, memptr->gmuaddr, memptr->size);
375 }
376
377 /* Free GMU shared kernel memory */
378 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
379 md = &gmu_kmem_entries[i];
380 free_gmu_mem(gmu, md);
381 clear_bit(i, &gmu_kmem_bitmap);
382 }
383
384 /* Detach the device from SMMU context bank */
385 iommu_detach_device(ctx->domain, ctx->dev);
386
387 /* free kernel mem context */
388 iommu_domain_free(ctx->domain);
389}
390
391void gmu_memory_close(struct gmu_device *gmu)
392{
393 gmu_kmem_close(gmu);
394 /* Free user memory context */
395 iommu_domain_free(gmu_ctx[GMU_CONTEXT_USER].domain);
396
397}
398
399/*
400 * gmu_memory_probe() - probe GMU IOMMU context banks and allocate memory
401 * to share with GMU in kernel mode.
402 * @gmu: Pointer to GMU device
403 * @node: Pointer to GMU device node
404 */
405int gmu_memory_probe(struct gmu_device *gmu, struct device_node *node)
406{
407 int ret;
408
409 ret = gmu_iommu_init(gmu, node);
410 if (ret)
411 return ret;
412
413 /* Allocates & maps memory for HFI */
414 gmu->hfi_mem = allocate_gmu_kmem(gmu, HFIMEM_SIZE,
415 (IOMMU_READ | IOMMU_WRITE));
416 if (IS_ERR(gmu->hfi_mem)) {
417 ret = PTR_ERR(gmu->hfi_mem);
418 goto err_ret;
419 }
420
421 /* Allocates & maps GMU crash dump memory */
422 gmu->dump_mem = allocate_gmu_kmem(gmu, DUMPMEM_SIZE,
423 (IOMMU_READ | IOMMU_WRITE));
424 if (IS_ERR(gmu->dump_mem)) {
425 ret = PTR_ERR(gmu->dump_mem);
426 goto err_ret;
427 }
428
429 return 0;
430err_ret:
431 gmu_memory_close(gmu);
432 return ret;
433}
434
435/*
436 * gmu_dcvs_set() - request GMU to change GPU frequency and/or bandwidth.
437 * @gmu: Pointer to GMU device
438 * @gpu_pwrlevel: index to GPU DCVS table used by KGSL
439 * @bus_level: index to GPU bus table used by KGSL
440 *
441 * The function converts GPU power level and bus level index used by KGSL
442 * to index being used by GMU/RPMh.
443 */
444int gmu_dcvs_set(struct gmu_device *gmu,
445 unsigned int gpu_pwrlevel, unsigned int bus_level)
446{
447 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
448 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
449 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
450 int perf_idx = INVALID_DCVS_IDX, bw_idx = INVALID_DCVS_IDX;
George Shena2f7b432017-08-18 12:58:18 -0700451 int ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800452
George Shena2f7b432017-08-18 12:58:18 -0700453 if (gpu_pwrlevel < gmu->num_gpupwrlevels - 1)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800454 perf_idx = gmu->num_gpupwrlevels - gpu_pwrlevel - 1;
455
George Shena2f7b432017-08-18 12:58:18 -0700456 if (bus_level < gmu->num_bwlevels && bus_level > 0)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800457 bw_idx = bus_level;
458
459 if ((perf_idx == INVALID_DCVS_IDX) &&
460 (bw_idx == INVALID_DCVS_IDX))
461 return -EINVAL;
462
George Shena2f7b432017-08-18 12:58:18 -0700463 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
464 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev,
Kyle Pieferb1027b02017-02-10 13:58:58 -0800465 GMU_DCVS_NOHFI, perf_idx, bw_idx);
466
George Shena2f7b432017-08-18 12:58:18 -0700467 if (ret) {
George Shenf135a972017-08-24 16:59:42 -0700468 dev_err_ratelimited(&gmu->pdev->dev,
George Shena2f7b432017-08-18 12:58:18 -0700469 "Failed to set GPU perf idx %d, bw idx %d\n",
470 perf_idx, bw_idx);
471
George Shenf135a972017-08-24 16:59:42 -0700472 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
473 adreno_dispatcher_schedule(device);
George Shena2f7b432017-08-18 12:58:18 -0700474 }
475
476 return ret;
477 }
478
Kyle Pieferb1027b02017-02-10 13:58:58 -0800479 return hfi_send_dcvs_vote(gmu, perf_idx, bw_idx, ACK_NONBLOCK);
480}
481
482struct rpmh_arc_vals {
483 unsigned int num;
484 uint16_t val[MAX_GX_LEVELS];
485};
486
487static const char gfx_res_id[] = "gfx.lvl";
488static const char cx_res_id[] = "cx.lvl";
489static const char mx_res_id[] = "mx.lvl";
490
491enum rpmh_vote_type {
492 GPU_ARC_VOTE = 0,
493 GMU_ARC_VOTE,
494 INVALID_ARC_VOTE,
495};
496
497static const char debug_strs[][8] = {
498 [GPU_ARC_VOTE] = "gpu",
499 [GMU_ARC_VOTE] = "gmu",
500};
501
502/*
503 * rpmh_arc_cmds() - query RPMh command database for GX/CX/MX rail
504 * VLVL tables. The index of table will be used by GMU to vote rail
505 * voltage.
506 *
507 * @gmu: Pointer to GMU device
508 * @arc: Pointer to RPMh rail controller (ARC) voltage table
509 * @res_id: Pointer to 8 char array that contains rail name
510 */
511static int rpmh_arc_cmds(struct gmu_device *gmu,
512 struct rpmh_arc_vals *arc, const char *res_id)
513{
514 unsigned int len;
515
516 len = cmd_db_get_aux_data_len(res_id);
Kyle Pieferec7b4192017-08-17 15:35:36 -0700517 if (len == 0)
518 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800519
520 if (len > (MAX_GX_LEVELS << 1)) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800521 dev_err(&gmu->pdev->dev,
522 "gfx cmddb size %d larger than alloc buf %d of %s\n",
523 len, (MAX_GX_LEVELS << 1), res_id);
524 return -EINVAL;
525 }
526
527 cmd_db_get_aux_data(res_id, (uint8_t *)arc->val, len);
Kyle Pieferec7b4192017-08-17 15:35:36 -0700528
529 /*
530 * cmd_db_get_aux_data() gives us a zero-padded table of
531 * size len that contains the arc values. To determine the
532 * number of arc values, we loop through the table and count
533 * them until we get to the end of the buffer or hit the
534 * zero padding.
535 */
Archana Sriram8ce571b2017-10-12 18:33:33 +0530536 for (arc->num = 1; arc->num < (len >> 1); arc->num++) {
537 if (arc->val[arc->num - 1] >= arc->val[arc->num])
George Shen07b4f782017-07-13 10:42:53 -0700538 break;
539 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800540
541 return 0;
542}
543
544/*
545 * setup_volt_dependency_tbl() - set up GX->MX or CX->MX rail voltage
546 * dependencies. Second rail voltage shall be equal to or higher than
547 * primary rail voltage. VLVL table index was used by RPMh for PMIC
548 * voltage setting.
549 * @votes: Pointer to a ARC vote descriptor
550 * @pri_rail: Pointer to primary power rail VLVL table
551 * @sec_rail: Pointer to second/dependent power rail VLVL table
552 * @vlvl: Pointer to VLVL table being used by GPU or GMU driver, a subset
553 * of pri_rail VLVL table
554 * @num_entries: Valid number of entries in table pointed by "vlvl" parameter
555 */
556static int setup_volt_dependency_tbl(struct arc_vote_desc *votes,
557 struct rpmh_arc_vals *pri_rail, struct rpmh_arc_vals *sec_rail,
558 unsigned int *vlvl, unsigned int num_entries)
559{
560 int i, j, k;
561 uint16_t cur_vlvl;
George Shen07b4f782017-07-13 10:42:53 -0700562 bool found_match;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800563
564 /* i tracks current KGSL GPU frequency table entry
565 * j tracks second rail voltage table entry
566 * k tracks primary rail voltage table entry
567 */
George Shen07b4f782017-07-13 10:42:53 -0700568 for (i = 0; i < num_entries; i++) {
569 found_match = false;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800570
George Shen07b4f782017-07-13 10:42:53 -0700571 /* Look for a primary rail voltage that matches a VLVL level */
572 for (k = 0; k < pri_rail->num; k++) {
573 if (pri_rail->val[k] == vlvl[i]) {
574 votes[i].pri_idx = k;
575 votes[i].vlvl = vlvl[i];
576 cur_vlvl = vlvl[i];
577 found_match = true;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800578 break;
579 }
580 }
581
George Shen07b4f782017-07-13 10:42:53 -0700582 /* If we did not find a matching VLVL level then abort */
583 if (!found_match)
584 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800585
George Shen07b4f782017-07-13 10:42:53 -0700586 /*
587 * Look for a secondary rail index whose VLVL value
588 * is greater than or equal to the VLVL value of the
589 * corresponding index of the primary rail
590 */
591 for (j = 0; j < sec_rail->num; j++) {
592 if (sec_rail->val[j] >= cur_vlvl ||
593 j + 1 == sec_rail->num) {
594 votes[i].sec_idx = j;
595 break;
596 }
597 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800598 }
599 return 0;
600}
601
602/*
603 * rpmh_arc_votes_init() - initialized RPMh votes needed for rails voltage
604 * scaling by GMU.
605 * @gmu: Pointer to GMU device
606 * @pri_rail: Pointer to primary power rail VLVL table
607 * @sec_rail: Pointer to second/dependent power rail VLVL table
608 * of pri_rail VLVL table
609 * @type: the type of the primary rail, GPU or GMU
610 */
611static int rpmh_arc_votes_init(struct gmu_device *gmu,
612 struct rpmh_arc_vals *pri_rail,
613 struct rpmh_arc_vals *sec_rail,
614 unsigned int type)
615{
George Shen07b4f782017-07-13 10:42:53 -0700616 struct device *dev;
617 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800618 unsigned int num_freqs;
619 struct arc_vote_desc *votes;
620 unsigned int vlvl_tbl[MAX_GX_LEVELS];
621 unsigned int *freq_tbl;
622 int i, ret;
George Shen07b4f782017-07-13 10:42:53 -0700623 struct dev_pm_opp *opp;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800624
625 if (type == GPU_ARC_VOTE) {
626 num_freqs = gmu->num_gpupwrlevels;
627 votes = gmu->rpmh_votes.gx_votes;
George Shen07b4f782017-07-13 10:42:53 -0700628 freq_tbl = gmu->gpu_freqs;
629 dev = &device->pdev->dev;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800630 } else if (type == GMU_ARC_VOTE) {
631 num_freqs = gmu->num_gmupwrlevels;
632 votes = gmu->rpmh_votes.cx_votes;
George Shen07b4f782017-07-13 10:42:53 -0700633 freq_tbl = gmu->gmu_freqs;
634 dev = &gmu->pdev->dev;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800635 } else {
636 return -EINVAL;
637 }
638
639 if (num_freqs > pri_rail->num) {
640 dev_err(&gmu->pdev->dev,
641 "%s defined more DCVS levels than RPMh can support\n",
642 debug_strs[type]);
643 return -EINVAL;
644 }
645
George Shen07b4f782017-07-13 10:42:53 -0700646 memset(vlvl_tbl, 0, sizeof(vlvl_tbl));
Kyle Pieferb1027b02017-02-10 13:58:58 -0800647 for (i = 0; i < num_freqs; i++) {
George Shen07b4f782017-07-13 10:42:53 -0700648 /* Hardcode VLVL for 0 because it is not registered in OPP */
649 if (freq_tbl[i] == 0) {
650 vlvl_tbl[i] = 0;
651 continue;
652 }
653
654 /* Otherwise get the value from the OPP API */
655 opp = dev_pm_opp_find_freq_exact(dev, freq_tbl[i], true);
656 if (IS_ERR(opp)) {
657 dev_err(&gmu->pdev->dev,
658 "Failed to find opp freq %d of %s\n",
659 freq_tbl[i], debug_strs[type]);
660 return PTR_ERR(opp);
661 }
662
663 /* Values from OPP framework are offset by 1 */
664 vlvl_tbl[i] = dev_pm_opp_get_voltage(opp) - 1;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800665 }
666
667 ret = setup_volt_dependency_tbl(votes,
668 pri_rail, sec_rail, vlvl_tbl, num_freqs);
669
670 if (ret)
671 dev_err(&gmu->pdev->dev, "%s rail volt failed to match DT freqs\n",
672 debug_strs[type]);
673
674 return ret;
675}
676
677/*
678 * build_rpmh_bw_votes() - build TCS commands to vote for bandwidth.
679 * Each command sets frequency of a node along path to DDR or CNOC.
680 * @rpmh_vote: Pointer to RPMh vote needed by GMU to set BW via RPMh
681 * @num_usecases: Number of BW use cases (or BW levels)
682 * @handle: Provided by bus driver. It contains TCS command sets for
683 * all BW use cases of a bus client.
684 */
685static void build_rpmh_bw_votes(struct gmu_bw_votes *rpmh_vote,
686 unsigned int num_usecases, struct msm_bus_tcs_handle handle)
687{
688 struct msm_bus_tcs_usecase *tmp;
689 int i, j;
690
691 for (i = 0; i < num_usecases; i++) {
692 tmp = &handle.usecases[i];
693 for (j = 0; j < tmp->num_cmds; j++) {
694 if (!i) {
695 /*
696 * Wait bitmask and TCS command addresses are
697 * same for all bw use cases. To save data volume
698 * exchanged between driver and GMU, only
699 * transfer bitmasks and TCS command addresses
700 * of first set of bw use case
701 */
702 rpmh_vote->cmds_per_bw_vote = tmp->num_cmds;
703 rpmh_vote->cmds_wait_bitmask =
704 tmp->cmds[j].complete ?
705 rpmh_vote->cmds_wait_bitmask
706 | BIT(i)
707 : rpmh_vote->cmds_wait_bitmask
708 & (~BIT(i));
709 rpmh_vote->cmd_addrs[j] = tmp->cmds[j].addr;
710 }
711 rpmh_vote->cmd_data[i][j] = tmp->cmds[j].data;
712 }
713 }
714}
715
716/*
717 * gmu_bus_vote_init - initialized RPMh votes needed for bw scaling by GMU.
718 * @gmu: Pointer to GMU device
719 * @pwr: Pointer to KGSL power controller
720 */
721static int gmu_bus_vote_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
722{
723 struct msm_bus_tcs_usecase *usecases;
724 struct msm_bus_tcs_handle hdl;
725 struct rpmh_votes_t *votes = &gmu->rpmh_votes;
726 int ret;
727
728 usecases = kcalloc(gmu->num_bwlevels, sizeof(*usecases), GFP_KERNEL);
729 if (!usecases)
730 return -ENOMEM;
731
732 hdl.num_usecases = gmu->num_bwlevels;
733 hdl.usecases = usecases;
734
735 /*
736 * Query TCS command set for each use case defined in GPU b/w table
737 */
738 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->pcl);
739 if (ret)
740 return ret;
741
742 build_rpmh_bw_votes(&votes->ddr_votes, gmu->num_bwlevels, hdl);
743
744 /*
745 *Query CNOC TCS command set for each use case defined in cnoc bw table
746 */
747 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->ccl);
748 if (ret)
749 return ret;
750
751 build_rpmh_bw_votes(&votes->cnoc_votes, gmu->num_cnocbwlevels, hdl);
752
753 kfree(usecases);
754
755 return 0;
756}
757
758int gmu_rpmh_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
759{
760 struct rpmh_arc_vals gfx_arc, cx_arc, mx_arc;
761 int ret;
762
763 /* Populate BW vote table */
764 ret = gmu_bus_vote_init(gmu, pwr);
765 if (ret)
766 return ret;
767
768 /* Populate GPU and GMU frequency vote table */
769 ret = rpmh_arc_cmds(gmu, &gfx_arc, gfx_res_id);
770 if (ret)
771 return ret;
772
773 ret = rpmh_arc_cmds(gmu, &cx_arc, cx_res_id);
774 if (ret)
775 return ret;
776
777 ret = rpmh_arc_cmds(gmu, &mx_arc, mx_res_id);
778 if (ret)
779 return ret;
780
781 ret = rpmh_arc_votes_init(gmu, &gfx_arc, &mx_arc, GPU_ARC_VOTE);
782 if (ret)
783 return ret;
784
785 return rpmh_arc_votes_init(gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE);
786}
787
788static irqreturn_t gmu_irq_handler(int irq, void *data)
789{
790 struct gmu_device *gmu = data;
791 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700792 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800793 unsigned int status = 0;
794
Kyle Piefere7b06b42017-04-06 13:53:01 -0700795 adreno_read_gmureg(ADRENO_DEVICE(device),
796 ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, &status);
797 adreno_write_gmureg(ADRENO_DEVICE(device),
798 ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800799
Kyle Piefere7b06b42017-04-06 13:53:01 -0700800 /* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700801 if (status & GMU_INT_WDOG_BITE) {
Kyle Piefere7b06b42017-04-06 13:53:01 -0700802 dev_err_ratelimited(&gmu->pdev->dev,
803 "GMU watchdog expired interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700804 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
805 adreno_dispatcher_schedule(device);
806 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700807 if (status & GMU_INT_HOST_AHB_BUS_ERR)
808 dev_err_ratelimited(&gmu->pdev->dev,
809 "AHB bus error interrupt received\n");
George Shenc59500e2017-10-11 14:37:32 -0700810 if (status & GMU_INT_FENCE_ERR) {
811 unsigned int fence_status;
812
813 adreno_read_gmureg(ADRENO_DEVICE(device),
814 ADRENO_REG_GMU_AHB_FENCE_STATUS, &fence_status);
815 dev_err_ratelimited(&gmu->pdev->dev,
816 "FENCE error interrupt received %x\n", fence_status);
817 }
818
Kyle Piefere7b06b42017-04-06 13:53:01 -0700819 if (status & ~GMU_AO_INT_MASK)
820 dev_err_ratelimited(&gmu->pdev->dev,
821 "Unhandled GMU interrupts 0x%lx\n",
822 status & ~GMU_AO_INT_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800823
Kyle Piefere7b06b42017-04-06 13:53:01 -0700824 return IRQ_HANDLED;
825}
Kyle Pieferb1027b02017-02-10 13:58:58 -0800826
Kyle Piefere7b06b42017-04-06 13:53:01 -0700827static irqreturn_t hfi_irq_handler(int irq, void *data)
828{
829 struct kgsl_hfi *hfi = data;
830 struct gmu_device *gmu = container_of(hfi, struct gmu_device, hfi);
831 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700832 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -0700833 unsigned int status = 0;
834
835 adreno_read_gmureg(ADRENO_DEVICE(device),
836 ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
837 adreno_write_gmureg(ADRENO_DEVICE(device),
838 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status);
839
840 if (status & HFI_IRQ_MSGQ_MASK)
841 tasklet_hi_schedule(&hfi->tasklet);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700842 if (status & HFI_IRQ_CM3_FAULT_MASK) {
Kyle Piefere7b06b42017-04-06 13:53:01 -0700843 dev_err_ratelimited(&gmu->pdev->dev,
844 "GMU CM3 fault interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700845 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
846 adreno_dispatcher_schedule(device);
847 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700848 if (status & ~HFI_IRQ_MASK)
849 dev_err_ratelimited(&gmu->pdev->dev,
850 "Unhandled HFI interrupts 0x%lx\n",
851 status & ~HFI_IRQ_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800852
853 return IRQ_HANDLED;
854}
855
856static int gmu_pwrlevel_probe(struct gmu_device *gmu, struct device_node *node)
857{
858 struct device_node *pwrlevel_node, *child;
859
860 pwrlevel_node = of_find_node_by_name(node, "qcom,gmu-pwrlevels");
861
862 if (pwrlevel_node == NULL) {
863 dev_err(&gmu->pdev->dev, "Unable to find 'qcom,gmu-pwrlevels'\n");
864 return -EINVAL;
865 }
866
867 gmu->num_gmupwrlevels = 0;
868
869 for_each_child_of_node(pwrlevel_node, child) {
870 unsigned int index;
871
872 if (of_property_read_u32(child, "reg", &index))
873 return -EINVAL;
874
875 if (index >= MAX_CX_LEVELS) {
876 dev_err(&gmu->pdev->dev, "gmu pwrlevel %d is out of range\n",
877 index);
878 continue;
879 }
880
881 if (index >= gmu->num_gmupwrlevels)
882 gmu->num_gmupwrlevels = index + 1;
883
884 if (of_property_read_u32(child, "qcom,gmu-freq",
885 &gmu->gmu_freqs[index]))
886 return -EINVAL;
887 }
888
889 return 0;
890}
891
George Shencc7e1092017-08-30 10:45:52 -0700892static int gmu_reg_probe(struct gmu_device *gmu, const char *name, bool is_gmu)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800893{
894 struct resource *res;
895
896 res = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM, name);
897 if (res == NULL) {
898 dev_err(&gmu->pdev->dev,
899 "platform_get_resource %s failed\n", name);
900 return -EINVAL;
901 }
902
903 if (res->start == 0 || resource_size(res) == 0) {
904 dev_err(&gmu->pdev->dev,
905 "dev %d %s invalid register region\n",
906 gmu->pdev->dev.id, name);
907 return -EINVAL;
908 }
909
George Shencc7e1092017-08-30 10:45:52 -0700910 if (is_gmu) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800911 gmu->reg_phys = res->start;
912 gmu->reg_len = resource_size(res);
913 gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
914 resource_size(res));
915
916 if (gmu->reg_virt == NULL) {
917 dev_err(&gmu->pdev->dev, "GMU regs ioremap failed\n");
918 return -ENODEV;
919 }
920
George Shencc7e1092017-08-30 10:45:52 -0700921 } else {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800922 gmu->pdc_reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
923 resource_size(res));
924 if (gmu->pdc_reg_virt == NULL) {
925 dev_err(&gmu->pdev->dev, "PDC regs ioremap failed\n");
926 return -ENODEV;
927 }
928 }
929
930 return 0;
931}
932
933static int gmu_clocks_probe(struct gmu_device *gmu, struct device_node *node)
934{
935 const char *cname;
936 struct property *prop;
937 struct clk *c;
938 int i = 0;
939
940 of_property_for_each_string(node, "clock-names", prop, cname) {
941 c = devm_clk_get(&gmu->pdev->dev, cname);
942
943 if (IS_ERR(c)) {
944 dev_err(&gmu->pdev->dev,
945 "dt: Couldn't get GMU clock: %s\n", cname);
946 return PTR_ERR(c);
947 }
948
949 if (i >= MAX_GMU_CLKS) {
950 dev_err(&gmu->pdev->dev,
951 "dt: too many GMU clocks defined\n");
952 return -EINVAL;
953 }
954
955 gmu->clks[i++] = c;
956 }
957
958 return 0;
959}
960
961static int gmu_gpu_bw_probe(struct gmu_device *gmu)
962{
963 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
964 struct msm_bus_scale_pdata *bus_scale_table;
965
966 bus_scale_table = msm_bus_cl_get_pdata(device->pdev);
967 if (bus_scale_table == NULL) {
968 dev_err(&gmu->pdev->dev, "dt: cannot get bus table\n");
969 return -ENODEV;
970 }
971
972 gmu->num_bwlevels = bus_scale_table->num_usecases;
973 gmu->pcl = msm_bus_scale_register_client(bus_scale_table);
974 if (!gmu->pcl) {
975 dev_err(&gmu->pdev->dev, "dt: cannot register bus client\n");
976 return -ENODEV;
977 }
978
979 return 0;
980}
981
982static int gmu_cnoc_bw_probe(struct gmu_device *gmu)
983{
984 struct msm_bus_scale_pdata *cnoc_table;
985
986 cnoc_table = msm_bus_cl_get_pdata(gmu->pdev);
987 if (cnoc_table == NULL) {
988 dev_err(&gmu->pdev->dev, "dt: cannot get cnoc table\n");
989 return -ENODEV;
990 }
991
992 gmu->num_cnocbwlevels = cnoc_table->num_usecases;
993 gmu->ccl = msm_bus_scale_register_client(cnoc_table);
994 if (!gmu->ccl) {
995 dev_err(&gmu->pdev->dev, "dt: cannot register cnoc client\n");
996 return -ENODEV;
997 }
998
999 return 0;
1000}
1001
1002static int gmu_regulators_probe(struct gmu_device *gmu,
1003 struct device_node *node)
1004{
1005 const char *name;
1006 struct property *prop;
1007 struct device *dev = &gmu->pdev->dev;
1008 int ret = 0;
1009
1010 of_property_for_each_string(node, "regulator-names", prop, name) {
1011 if (!strcmp(name, "vddcx")) {
1012 gmu->cx_gdsc = devm_regulator_get(dev, name);
1013 if (IS_ERR(gmu->cx_gdsc)) {
1014 ret = PTR_ERR(gmu->cx_gdsc);
1015 dev_err(dev, "dt: GMU couldn't get CX gdsc\n");
1016 gmu->cx_gdsc = NULL;
1017 return ret;
1018 }
1019 } else if (!strcmp(name, "vdd")) {
1020 gmu->gx_gdsc = devm_regulator_get(dev, name);
1021 if (IS_ERR(gmu->gx_gdsc)) {
1022 ret = PTR_ERR(gmu->gx_gdsc);
1023 dev_err(dev, "dt: GMU couldn't get GX gdsc\n");
1024 gmu->gx_gdsc = NULL;
1025 return ret;
1026 }
1027 } else {
1028 dev_err(dev, "dt: Unknown GMU regulator: %s\n", name);
1029 return -ENODEV;
1030 }
1031 }
1032
1033 return 0;
1034}
1035
Kyle Piefere7b06b42017-04-06 13:53:01 -07001036static int gmu_irq_probe(struct gmu_device *gmu)
1037{
1038 int ret;
1039 struct kgsl_hfi *hfi = &gmu->hfi;
1040
1041 hfi->hfi_interrupt_num = platform_get_irq_byname(gmu->pdev,
1042 "kgsl_hfi_irq");
1043 ret = devm_request_irq(&gmu->pdev->dev,
1044 hfi->hfi_interrupt_num,
1045 hfi_irq_handler, IRQF_TRIGGER_HIGH,
1046 "HFI", hfi);
1047 if (ret) {
1048 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1049 hfi->hfi_interrupt_num, ret);
1050 return ret;
1051 }
1052
1053 gmu->gmu_interrupt_num = platform_get_irq_byname(gmu->pdev,
1054 "kgsl_gmu_irq");
1055 ret = devm_request_irq(&gmu->pdev->dev,
1056 gmu->gmu_interrupt_num,
1057 gmu_irq_handler, IRQF_TRIGGER_HIGH,
1058 "GMU", gmu);
1059 if (ret)
1060 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1061 gmu->gmu_interrupt_num, ret);
1062
1063 return ret;
1064}
1065
1066static void gmu_irq_enable(struct kgsl_device *device)
1067{
1068 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1069 struct gmu_device *gmu = &device->gmu;
1070 struct kgsl_hfi *hfi = &gmu->hfi;
1071
1072 /* Clear any pending IRQs before unmasking on GMU */
1073 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1074 0xFFFFFFFF);
1075 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1076 0xFFFFFFFF);
1077
1078 /* Unmask needed IRQs on GMU */
1079 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1080 (unsigned int) ~HFI_IRQ_MASK);
1081 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1082 (unsigned int) ~GMU_AO_INT_MASK);
1083
1084 /* Enable all IRQs on host */
1085 enable_irq(hfi->hfi_interrupt_num);
1086 enable_irq(gmu->gmu_interrupt_num);
1087}
1088
1089static void gmu_irq_disable(struct kgsl_device *device)
1090{
1091 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1092 struct gmu_device *gmu = &device->gmu;
1093 struct kgsl_hfi *hfi = &gmu->hfi;
1094
1095 /* Disable all IRQs on host */
1096 disable_irq(gmu->gmu_interrupt_num);
1097 disable_irq(hfi->hfi_interrupt_num);
1098
1099 /* Mask all IRQs on GMU */
1100 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1101 0xFFFFFFFF);
1102 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1103 0xFFFFFFFF);
1104
1105 /* Clear any pending IRQs before disabling */
1106 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1107 0xFFFFFFFF);
1108 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1109 0xFFFFFFFF);
1110}
1111
Kyle Pieferb1027b02017-02-10 13:58:58 -08001112/* Do not access any GMU registers in GMU probe function */
1113int gmu_probe(struct kgsl_device *device)
1114{
1115 struct device_node *node;
1116 struct gmu_device *gmu = &device->gmu;
1117 struct gmu_memdesc *mem_addr = NULL;
1118 struct kgsl_hfi *hfi = &gmu->hfi;
1119 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Kyle Pieferd3964162017-04-06 15:44:03 -07001120 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001121 int i = 0, ret = -ENXIO;
1122
1123 node = of_find_compatible_node(device->pdev->dev.of_node,
1124 NULL, "qcom,gpu-gmu");
1125
1126 if (node == NULL)
1127 return ret;
1128
1129 device->gmu.pdev = of_find_device_by_node(node);
1130
1131 /* Set up GMU regulators */
1132 ret = gmu_regulators_probe(gmu, node);
1133 if (ret)
1134 goto error;
1135
1136 /* Set up GMU clocks */
1137 ret = gmu_clocks_probe(gmu, node);
1138 if (ret)
1139 goto error;
1140
1141 /* Set up GMU IOMMU and shared memory with GMU */
1142 ret = gmu_memory_probe(&device->gmu, node);
1143 if (ret)
1144 goto error;
1145 mem_addr = gmu->hfi_mem;
1146
1147 /* Map and reserve GMU CSRs registers */
George Shencc7e1092017-08-30 10:45:52 -07001148 ret = gmu_reg_probe(gmu, "kgsl_gmu_reg", true);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001149 if (ret)
1150 goto error;
1151
George Shencc7e1092017-08-30 10:45:52 -07001152 ret = gmu_reg_probe(gmu, "kgsl_gmu_pdc_reg", false);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001153 if (ret)
1154 goto error;
1155
1156 gmu->gmu2gpu_offset = (gmu->reg_phys - device->reg_phys) >> 2;
1157
Kyle Piefere7b06b42017-04-06 13:53:01 -07001158 /* Initialize HFI and GMU interrupts */
1159 ret = gmu_irq_probe(gmu);
1160 if (ret)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001161 goto error;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001162
1163 /* Don't enable GMU interrupts until GMU started */
Kyle Piefere7b06b42017-04-06 13:53:01 -07001164 /* We cannot use gmu_irq_disable because it writes registers */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001165 disable_irq(gmu->gmu_interrupt_num);
1166 disable_irq(hfi->hfi_interrupt_num);
1167
1168 tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long)gmu);
1169 INIT_LIST_HEAD(&hfi->msglist);
1170 spin_lock_init(&hfi->msglock);
1171
1172 /* Retrieves GMU/GPU power level configurations*/
1173 ret = gmu_pwrlevel_probe(gmu, node);
1174 if (ret)
1175 goto error;
1176
1177 gmu->num_gpupwrlevels = pwr->num_pwrlevels;
1178
1179 for (i = 0; i < gmu->num_gpupwrlevels; i++) {
1180 int j = gmu->num_gpupwrlevels - 1 - i;
1181
1182 gmu->gpu_freqs[i] = pwr->pwrlevels[j].gpu_freq;
1183 }
1184
1185 /* Initializes GPU b/w levels configuration */
1186 ret = gmu_gpu_bw_probe(gmu);
1187 if (ret)
1188 goto error;
1189
1190 /* Initialize GMU CNOC b/w levels configuration */
1191 ret = gmu_cnoc_bw_probe(gmu);
1192 if (ret)
1193 goto error;
1194
1195 /* Populates RPMh configurations */
1196 ret = gmu_rpmh_init(gmu, pwr);
1197 if (ret)
1198 goto error;
1199
1200 hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
1201
Kyle Pieferd3964162017-04-06 15:44:03 -07001202 /* Set up GMU idle states */
1203 if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT))
1204 gmu->idle_level = GPU_HW_MIN_VOLT;
1205 else if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP))
1206 gmu->idle_level = GPU_HW_NAP;
1207 else if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
1208 gmu->idle_level = GPU_HW_IFPC;
1209 else if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
1210 gmu->idle_level = GPU_HW_SPTP_PC;
1211 else
1212 gmu->idle_level = GPU_HW_ACTIVE;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001213
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07001214 /* disable LM during boot time */
1215 clear_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001216 return 0;
1217
1218error:
1219 gmu_remove(device);
1220 return ret;
1221}
1222
1223
1224
1225static int gmu_enable_clks(struct gmu_device *gmu)
1226{
1227 int ret, j = 0;
1228
1229 if (IS_ERR_OR_NULL(gmu->clks[0]))
1230 return -EINVAL;
1231
1232 ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1233 if (ret) {
1234 dev_err(&gmu->pdev->dev, "fail to set default GMU clk freq %d\n",
1235 gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1236 return ret;
1237 }
1238
1239 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1240 ret = clk_prepare_enable(gmu->clks[j]);
1241 if (ret) {
1242 dev_err(&gmu->pdev->dev,
1243 "fail to enable gpucc clk idx %d\n",
1244 j);
1245 return ret;
1246 }
1247 j++;
1248 }
1249
1250 set_bit(GMU_CLK_ON, &gmu->flags);
1251 return 0;
1252}
1253
1254static int gmu_disable_clks(struct gmu_device *gmu)
1255{
Kyle Pieferde855722017-07-07 12:18:59 -07001256 int j = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001257
1258 if (IS_ERR_OR_NULL(gmu->clks[0]))
1259 return 0;
1260
Kyle Pieferb1027b02017-02-10 13:58:58 -08001261 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1262 clk_disable_unprepare(gmu->clks[j]);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001263 j++;
1264 }
1265
1266 clear_bit(GMU_CLK_ON, &gmu->flags);
1267 return 0;
1268
1269}
1270
1271static int gmu_enable_gdsc(struct gmu_device *gmu)
1272{
1273 int ret;
1274
1275 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1276 return 0;
1277
1278 ret = regulator_enable(gmu->cx_gdsc);
1279 if (ret)
1280 dev_err(&gmu->pdev->dev,
1281 "Failed to enable GMU CX gdsc, error %d\n", ret);
1282
1283 return ret;
1284}
1285
George Shenccf7ab42017-10-16 17:22:43 -07001286#define CX_GDSC_TIMEOUT 5000 /* ms */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001287static int gmu_disable_gdsc(struct gmu_device *gmu)
1288{
1289 int ret;
George Shen433b0c72017-06-12 09:44:34 -07001290 unsigned long t;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001291
1292 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1293 return 0;
1294
1295 ret = regulator_disable(gmu->cx_gdsc);
George Shen433b0c72017-06-12 09:44:34 -07001296 if (ret) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001297 dev_err(&gmu->pdev->dev,
1298 "Failed to disable GMU CX gdsc, error %d\n", ret);
George Shen433b0c72017-06-12 09:44:34 -07001299 return ret;
1300 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001301
George Shen433b0c72017-06-12 09:44:34 -07001302 /*
1303 * After GX GDSC is off, CX GDSC must be off
1304 * Voting off alone from GPU driver cannot
George Shenccf7ab42017-10-16 17:22:43 -07001305 * Guarantee CX GDSC off. Polling with 5s
George Shen433b0c72017-06-12 09:44:34 -07001306 * timeout to ensure
1307 */
1308 t = jiffies + msecs_to_jiffies(CX_GDSC_TIMEOUT);
1309 do {
1310 if (!regulator_is_enabled(gmu->cx_gdsc))
1311 return 0;
George Shenf364b542017-06-20 17:02:43 -07001312 cond_resched();
George Shen433b0c72017-06-12 09:44:34 -07001313
1314 } while (!(time_after(jiffies, t)));
1315
1316 dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout");
1317 return -ETIMEDOUT;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001318}
1319
Kyle Piefere923b7a2017-03-28 17:31:48 -07001320static int gmu_suspend(struct kgsl_device *device)
1321{
1322 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1323 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1324 struct gmu_device *gmu = &device->gmu;
1325
1326 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1327 return 0;
1328
1329 /* Pending message in all queues are abandoned */
1330 hfi_stop(gmu);
1331 clear_bit(GMU_HFI_ON, &gmu->flags);
1332 gmu_irq_disable(device);
1333
1334 if (gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0))
1335 return -EINVAL;
1336
1337 gmu_disable_clks(gmu);
1338 gmu_disable_gdsc(gmu);
George Shenf135a972017-08-24 16:59:42 -07001339 dev_err(&gmu->pdev->dev, "Suspended GMU\n");
Kyle Piefere923b7a2017-03-28 17:31:48 -07001340 return 0;
1341}
1342
George Shen6927d8f2017-07-19 11:38:10 -07001343static void gmu_snapshot(struct kgsl_device *device)
1344{
1345 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1346 struct gmu_device *gmu = &device->gmu;
1347
George Shenf135a972017-08-24 16:59:42 -07001348 if (!gmu->fault_count) {
George Shen6927d8f2017-07-19 11:38:10 -07001349 /* Mask so there's no interrupt caused by NMI */
1350 adreno_write_gmureg(adreno_dev,
1351 ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
1352
1353 /* Make sure the interrupt is masked before causing it */
1354 wmb();
1355 adreno_write_gmureg(adreno_dev,
1356 ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
1357 adreno_write_gmureg(adreno_dev,
1358 ADRENO_REG_GMU_CM3_CFG, (1 << 9));
1359
1360 /* Wait for the NMI to be handled */
1361 wmb();
1362 udelay(100);
Carter Cooperb88b7082017-09-14 09:03:26 -06001363 kgsl_device_snapshot(device, NULL, true);
George Shen6927d8f2017-07-19 11:38:10 -07001364
1365 adreno_write_gmureg(adreno_dev,
1366 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, 0xFFFFFFFF);
1367 adreno_write_gmureg(adreno_dev,
1368 ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1369 (unsigned int) ~HFI_IRQ_MASK);
1370 }
George Shenf135a972017-08-24 16:59:42 -07001371
1372 gmu->fault_count++;
George Shen6927d8f2017-07-19 11:38:10 -07001373}
1374
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301375static void gmu_change_gpu_pwrlevel(struct kgsl_device *device,
1376 unsigned int new_level) {
1377
1378 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1379 unsigned int old_level = pwr->active_pwrlevel;
1380
1381 /*
1382 * Update the level according to any thermal,
1383 * max/min, or power constraints.
1384 */
1385 new_level = kgsl_pwrctrl_adjust_pwrlevel(device, new_level);
1386
1387 /*
1388 * If thermal cycling is required and the new level hits the
1389 * thermal limit, kick off the cycling.
1390 */
1391 kgsl_pwrctrl_set_thermal_cycle(device, new_level);
1392
1393 pwr->active_pwrlevel = new_level;
1394 pwr->previous_pwrlevel = old_level;
1395
1396 /* Request adjusted DCVS level */
1397 kgsl_clk_set_rate(device, pwr->active_pwrlevel);
1398}
1399
Kyle Pieferb1027b02017-02-10 13:58:58 -08001400/* To be called to power on both GPU and GMU */
1401int gmu_start(struct kgsl_device *device)
1402{
Kyle Pieferb801ab92017-07-13 14:54:13 -07001403 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001404 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1405 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1406 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1407 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001408
Kyle Piefere923b7a2017-03-28 17:31:48 -07001409 switch (device->state) {
1410 case KGSL_STATE_INIT:
1411 case KGSL_STATE_SUSPEND:
1412 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1413 gmu_enable_gdsc(gmu);
1414 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001415 gmu_irq_enable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001416
Kyle Pieferb1027b02017-02-10 13:58:58 -08001417 /* Vote for 300MHz DDR for GMU to init */
1418 ret = msm_bus_scale_client_update_request(gmu->pcl,
Kyle Pieferb801ab92017-07-13 14:54:13 -07001419 pwr->pwrlevels[pwr->default_pwrlevel].bus_freq);
George Shenf135a972017-08-24 16:59:42 -07001420 if (ret)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001421 dev_err(&gmu->pdev->dev,
George Shenf135a972017-08-24 16:59:42 -07001422 "Failed to allocate gmu b/w: %d\n", ret);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001423
1424 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1425 GMU_COLD_BOOT, 0);
1426 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001427 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001428
1429 ret = hfi_start(gmu, GMU_COLD_BOOT);
1430 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001431 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001432
Oleg Perelet5306240a2017-09-19 12:32:51 -07001433 /* Request default DCVS level */
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301434 gmu_change_gpu_pwrlevel(device, pwr->default_pwrlevel);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001435 msm_bus_scale_client_update_request(gmu->pcl, 0);
1436 break;
1437
1438 case KGSL_STATE_SLUMBER:
1439 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1440 gmu_enable_gdsc(gmu);
1441 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001442 gmu_irq_enable(device);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001443
Kyle Pieferb1027b02017-02-10 13:58:58 -08001444 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1445 GMU_WARM_BOOT, 0);
1446 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001447 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001448
1449 ret = hfi_start(gmu, GMU_WARM_BOOT);
1450 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001451 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001452
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301453 gmu_change_gpu_pwrlevel(device, pwr->default_pwrlevel);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001454 break;
1455
1456 case KGSL_STATE_RESET:
George Shenf135a972017-08-24 16:59:42 -07001457 if (test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv) ||
1458 test_bit(GMU_FAULT, &gmu->flags)) {
Kyle Piefere923b7a2017-03-28 17:31:48 -07001459 gmu_suspend(device);
1460 gmu_enable_gdsc(gmu);
1461 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001462 gmu_irq_enable(device);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001463
Kyle Piefere923b7a2017-03-28 17:31:48 -07001464 ret = gpudev->rpmh_gpu_pwrctrl(
1465 adreno_dev, GMU_FW_START, GMU_RESET, 0);
1466 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001467 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001468
Kyle Piefere923b7a2017-03-28 17:31:48 -07001469
Kyle Piefer7a714cd2017-06-21 15:55:47 -07001470 ret = hfi_start(gmu, GMU_COLD_BOOT);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001471 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001472 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001473
1474 /* Send DCVS level prior to reset*/
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301475 gmu_change_gpu_pwrlevel(device,
Oleg Perelet5306240a2017-09-19 12:32:51 -07001476 pwr->default_pwrlevel);
Kyle Piefer42de1402017-09-15 11:28:47 -07001477 } else {
1478 /* GMU fast boot */
1479 hfi_stop(gmu);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001480
Kyle Piefer42de1402017-09-15 11:28:47 -07001481 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1482 GMU_RESET, 0);
1483 if (ret)
1484 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001485
Kyle Piefer42de1402017-09-15 11:28:47 -07001486 ret = hfi_start(gmu, GMU_WARM_BOOT);
1487 if (ret)
1488 goto error_gmu;
1489 }
Kyle Piefere923b7a2017-03-28 17:31:48 -07001490 break;
1491 default:
1492 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001493 }
1494
Kyle Piefere923b7a2017-03-28 17:31:48 -07001495 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001496
George Shenf135a972017-08-24 16:59:42 -07001497error_gmu:
Kyle Piefer42de1402017-09-15 11:28:47 -07001498 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1499 gpudev->oob_clear(adreno_dev,
1500 OOB_BOOT_SLUMBER_CLEAR_MASK);
George Shen6927d8f2017-07-19 11:38:10 -07001501 gmu_snapshot(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001502 return ret;
1503}
1504
1505/* Caller shall ensure GPU is ready for SLUMBER */
1506void gmu_stop(struct kgsl_device *device)
1507{
1508 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001509 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1510 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001511 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001512
1513 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1514 return;
1515
Kyle Piefer4033f562017-08-16 10:00:48 -07001516 /* Wait for the lowest idle level we requested */
1517 if (gpudev->wait_for_lowest_idle &&
1518 gpudev->wait_for_lowest_idle(adreno_dev))
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001519 goto error;
Carter Cooper1ee715a2017-09-07 16:08:38 -06001520
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001521 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
1522 if (ret)
1523 goto error;
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001524
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001525 if (gpudev->wait_for_gmu_idle &&
1526 gpudev->wait_for_gmu_idle(adreno_dev))
1527 goto error;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001528
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001529 /* Pending message in all queues are abandoned */
1530 hfi_stop(gmu);
1531 clear_bit(GMU_HFI_ON, &gmu->flags);
1532 gmu_irq_disable(device);
1533
1534 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
1535 gmu_disable_clks(gmu);
1536 gmu_disable_gdsc(gmu);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001537
1538 msm_bus_scale_client_update_request(gmu->pcl, 0);
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001539 return;
1540
1541error:
1542 /*
1543 * The power controller will change state to SLUMBER anyway
1544 * Set GMU_FAULT flag to indicate to power contrller
1545 * that hang recovery is needed to power on GPU
1546 */
1547 set_bit(GMU_FAULT, &gmu->flags);
1548 dev_err(&gmu->pdev->dev, "Failed to stop GMU\n");
1549 gmu_snapshot(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001550}
1551
1552void gmu_remove(struct kgsl_device *device)
1553{
1554 struct gmu_device *gmu = &device->gmu;
1555 struct kgsl_hfi *hfi = &gmu->hfi;
Kyle Piefer8570d512017-04-21 14:50:51 -07001556 int i = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001557
1558 if (!device->gmu.pdev)
1559 return;
1560
1561 tasklet_kill(&hfi->tasklet);
1562
1563 gmu_stop(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -07001564 gmu_irq_disable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001565
Kyle Piefer8570d512017-04-21 14:50:51 -07001566 while ((i < MAX_GMU_CLKS) && gmu->clks[i]) {
1567 gmu->clks[i] = NULL;
1568 i++;
1569 }
1570
Kyle Pieferf7febd62017-03-20 16:49:49 -07001571 if (gmu->gmu_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001572 devm_free_irq(&gmu->pdev->dev,
1573 gmu->gmu_interrupt_num, gmu);
1574 gmu->gmu_interrupt_num = 0;
1575 }
1576
1577 if (hfi->hfi_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001578 devm_free_irq(&gmu->pdev->dev,
Kyle Piefercec5e212017-05-19 13:15:15 -07001579 hfi->hfi_interrupt_num, hfi);
Kyle Pieferf7febd62017-03-20 16:49:49 -07001580 hfi->hfi_interrupt_num = 0;
1581 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001582
1583 if (gmu->ccl) {
1584 msm_bus_scale_unregister_client(gmu->ccl);
1585 gmu->ccl = 0;
1586 }
1587
1588 if (gmu->pcl) {
1589 msm_bus_scale_unregister_client(gmu->pcl);
1590 gmu->pcl = 0;
1591 }
1592
1593 if (gmu->pdc_reg_virt) {
1594 devm_iounmap(&gmu->pdev->dev, gmu->pdc_reg_virt);
1595 gmu->pdc_reg_virt = NULL;
1596 }
1597
1598 if (gmu->reg_virt) {
1599 devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001600 gmu->reg_virt = NULL;
1601 }
1602
1603 if (gmu->hfi_mem || gmu->dump_mem)
1604 gmu_memory_close(&device->gmu);
1605
1606 for (i = 0; i < MAX_GMU_CLKS; i++) {
1607 if (gmu->clks[i]) {
1608 devm_clk_put(&gmu->pdev->dev, gmu->clks[i]);
1609 gmu->clks[i] = NULL;
1610 }
1611 }
1612
1613 if (gmu->gx_gdsc) {
1614 devm_regulator_put(gmu->gx_gdsc);
1615 gmu->gx_gdsc = NULL;
1616 }
1617
1618 if (gmu->cx_gdsc) {
1619 devm_regulator_put(gmu->cx_gdsc);
1620 gmu->cx_gdsc = NULL;
1621 }
1622
1623 device->gmu.pdev = NULL;
1624}