blob: df25c28cd2650694040108390f1d728b9c1a5ab8 [file] [log] [blame]
Kyle Pieferb1027b02017-02-10 13:58:58 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Abhilash Kumarb07f9b02017-09-26 15:29:34 +053013#include <linux/module.h>
Kyle Pieferb1027b02017-02-10 13:58:58 -080014#include <linux/types.h>
15#include <linux/device.h>
16#include <linux/iommu.h>
17#include <linux/of_platform.h>
18#include <linux/msm-bus.h>
19#include <linux/msm-bus-board.h>
20#include <linux/pm_opp.h>
21#include <soc/qcom/cmd-db.h>
22
23#include "kgsl_device.h"
24#include "kgsl_gmu.h"
25#include "kgsl_hfi.h"
26#include "a6xx_reg.h"
27#include "adreno.h"
28
Abhilash Kumarb07f9b02017-09-26 15:29:34 +053029#undef MODULE_PARAM_PREFIX
30#define MODULE_PARAM_PREFIX "kgsl_gmu."
31
32static bool nogmu;
33module_param(nogmu, bool, 0444);
34MODULE_PARM_DESC(nogmu, "Disable the GMU");
35
Kyle Pieferb1027b02017-02-10 13:58:58 -080036#define GMU_CONTEXT_USER 0
37#define GMU_CONTEXT_KERNEL 1
38#define GMU_KERNEL_ENTRIES 8
39
40enum gmu_iommu_mem_type {
41 GMU_CACHED_CODE,
42 GMU_CACHED_DATA,
43 GMU_NONCACHED_KERNEL,
44 GMU_NONCACHED_USER
45};
46
47/*
48 * GMU virtual memory mapping definitions
49 */
50struct gmu_vma {
51 unsigned int noncached_ustart;
52 unsigned int noncached_usize;
53 unsigned int noncached_kstart;
54 unsigned int noncached_ksize;
55 unsigned int cached_dstart;
56 unsigned int cached_dsize;
57 unsigned int cached_cstart;
58 unsigned int cached_csize;
59 unsigned int image_start;
60};
61
George Shena2f7b432017-08-18 12:58:18 -070062static void gmu_snapshot(struct kgsl_device *device);
63
Kyle Pieferb1027b02017-02-10 13:58:58 -080064struct gmu_iommu_context {
65 const char *name;
66 struct device *dev;
67 struct iommu_domain *domain;
68};
69
70#define HFIMEM_SIZE SZ_16K
71
72#define DUMPMEM_SIZE SZ_16K
73
74/* Define target specific GMU VMA configurations */
75static const struct gmu_vma vma = {
76 /* Noncached user segment */
77 0x80000000, SZ_1G,
78 /* Noncached kernel segment */
79 0x60000000, SZ_512M,
80 /* Cached data segment */
81 0x44000, (SZ_256K-SZ_16K),
82 /* Cached code segment */
83 0x0, (SZ_256K-SZ_16K),
84 /* FW image */
85 0x0,
86};
87
88struct gmu_iommu_context gmu_ctx[] = {
89 [GMU_CONTEXT_USER] = { .name = "gmu_user" },
90 [GMU_CONTEXT_KERNEL] = { .name = "gmu_kernel" }
91};
92
93/*
94 * There are a few static memory buffers that are allocated and mapped at boot
95 * time for GMU to function. The buffers are permanent (not freed) after
96 * GPU boot. The size of the buffers are constant and not expected to change.
97 *
98 * We define an array and a simple allocator to keep track of the currently
99 * active SMMU entries of GMU kernel mode context. Each entry is assigned
100 * a unique address inside GMU kernel mode address range. The addresses
101 * are assigned sequentially and aligned to 1MB each.
102 *
103 */
104static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES];
105static unsigned long gmu_kmem_bitmap;
106
Kyle Piefer11a48b62017-03-17 14:53:40 -0700107/*
108 * kgsl_gmu_isenabled() - Check if there is a GMU and it is enabled
109 * @device: Pointer to the KGSL device that owns the GMU
110 *
111 * Check if a GMU has been found and successfully probed. Also
112 * check that the feature flag to use a GMU is enabled. Returns
113 * true if both of these conditions are met, otherwise false.
114 */
115bool kgsl_gmu_isenabled(struct kgsl_device *device)
116{
117 struct gmu_device *gmu = &device->gmu;
118 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
119
Abhilash Kumarb07f9b02017-09-26 15:29:34 +0530120 if (!nogmu && gmu->pdev &&
121 ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
Kyle Piefer11a48b62017-03-17 14:53:40 -0700122 return true;
Kyle Piefer11a48b62017-03-17 14:53:40 -0700123 return false;
124}
125
Kyle Pieferb1027b02017-02-10 13:58:58 -0800126static int _gmu_iommu_fault_handler(struct device *dev,
127 unsigned long addr, int flags, const char *name)
128{
129 char *fault_type = "unknown";
130
131 if (flags & IOMMU_FAULT_TRANSLATION)
132 fault_type = "translation";
133 else if (flags & IOMMU_FAULT_PERMISSION)
134 fault_type = "permission";
135
136 dev_err(dev, "GMU fault addr = %lX, context=%s (%s %s fault)\n",
137 addr, name,
138 (flags & IOMMU_FAULT_WRITE) ? "write" : "read",
139 fault_type);
140
141 return 0;
142}
143
144static int gmu_kernel_fault_handler(struct iommu_domain *domain,
145 struct device *dev, unsigned long addr, int flags, void *token)
146{
147 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_kernel");
148}
149
150static int gmu_user_fault_handler(struct iommu_domain *domain,
151 struct device *dev, unsigned long addr, int flags, void *token)
152{
153 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_user");
154}
155
156static void free_gmu_mem(struct gmu_device *gmu,
157 struct gmu_memdesc *md)
158{
159 /* Free GMU image memory */
160 if (md->hostptr)
161 dma_free_attrs(&gmu->pdev->dev, (size_t) md->size,
162 (void *)md->hostptr, md->physaddr, 0);
163 memset(md, 0, sizeof(*md));
164}
165
166static int alloc_and_map(struct gmu_device *gmu, unsigned int ctx_id,
167 struct gmu_memdesc *md, unsigned int attrs)
168{
169 int ret;
170 struct iommu_domain *domain;
171
172 domain = gmu_ctx[ctx_id].domain;
173
174 md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size,
175 &md->physaddr, GFP_KERNEL, 0);
176
177 if (md->hostptr == NULL)
178 return -ENOMEM;
179
180 ret = iommu_map(domain, md->gmuaddr,
181 md->physaddr, md->size,
182 attrs);
183
184 if (ret) {
185 dev_err(&gmu->pdev->dev,
186 "gmu map err: gaddr=0x%016llX, paddr=0x%016llX\n",
187 md->gmuaddr, md->physaddr);
188 free_gmu_mem(gmu, md);
189 }
190
191 return ret;
192}
193
194/*
195 * allocate_gmu_image() - allocates & maps memory for FW image, the size
196 * shall come from the loaded f/w file. Firmware image size shall be
197 * less than code cache size. Otherwise, FW may experience performance issue.
198 * @gmu: Pointer to GMU device
199 * @size: Requested allocation size
200 */
201int allocate_gmu_image(struct gmu_device *gmu, unsigned int size)
202{
203 struct gmu_memdesc *md = &gmu->fw_image;
204
205 if (size > vma.cached_csize) {
206 dev_err(&gmu->pdev->dev,
207 "GMU firmware size too big: %d\n", size);
208 return -EINVAL;
209 }
210
211 md->size = size;
212 md->gmuaddr = vma.image_start;
213 md->attr = GMU_CACHED_CODE;
214
215 return alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, IOMMU_READ);
216}
217
218/*
219 * allocate_gmu_kmem() - allocates and maps GMU kernel shared memory
220 * @gmu: Pointer to GMU device
221 * @size: Requested size
222 * @attrs: IOMMU mapping attributes
223 */
224static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
225 unsigned int size, unsigned int attrs)
226{
227 struct gmu_memdesc *md;
228 int ret, entry_idx = find_first_zero_bit(
229 &gmu_kmem_bitmap, GMU_KERNEL_ENTRIES);
230
231 size = PAGE_ALIGN(size);
232
233 if (size > SZ_1M || size == 0) {
234 dev_err(&gmu->pdev->dev,
235 "Requested %d bytes of GMU kernel memory, max=1MB\n",
236 size);
237 return ERR_PTR(-EINVAL);
238 }
239
240 if (entry_idx >= GMU_KERNEL_ENTRIES) {
241 dev_err(&gmu->pdev->dev,
242 "Ran out of GMU kernel mempool slots\n");
243 return ERR_PTR(-EINVAL);
244 }
245
246 /* Allocate GMU virtual memory */
247 md = &gmu_kmem_entries[entry_idx];
248 md->gmuaddr = vma.noncached_kstart + (entry_idx * SZ_1M);
249 set_bit(entry_idx, &gmu_kmem_bitmap);
250 md->attr = GMU_NONCACHED_KERNEL;
251 md->size = size;
252
253 ret = alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, attrs);
254
255 if (ret) {
256 clear_bit(entry_idx, &gmu_kmem_bitmap);
257 md->gmuaddr = 0;
258 return ERR_PTR(ret);
259 }
260
261 return md;
262}
263
264static int gmu_iommu_cb_probe(struct gmu_device *gmu,
265 struct gmu_iommu_context *ctx,
266 struct device_node *node)
267{
268 struct platform_device *pdev = of_find_device_by_node(node);
269 struct device *dev;
270 int ret;
271
272 dev = &pdev->dev;
273
274 ctx->dev = dev;
275 ctx->domain = iommu_domain_alloc(&platform_bus_type);
276 if (ctx->domain == NULL) {
277 dev_err(&gmu->pdev->dev, "gmu iommu fail to alloc %s domain\n",
278 ctx->name);
279 return -ENODEV;
280 }
281
282 ret = iommu_attach_device(ctx->domain, dev);
283 if (ret) {
284 dev_err(&gmu->pdev->dev, "gmu iommu fail to attach %s device\n",
285 ctx->name);
286 iommu_domain_free(ctx->domain);
287 }
288
289 return ret;
290}
291
292static struct {
293 const char *compatible;
294 int index;
295 iommu_fault_handler_t hdlr;
296} cbs[] = {
297 { "qcom,smmu-gmu-user-cb",
298 GMU_CONTEXT_USER,
299 gmu_user_fault_handler,
300 },
301 { "qcom,smmu-gmu-kernel-cb",
302 GMU_CONTEXT_KERNEL,
303 gmu_kernel_fault_handler,
304 },
305};
306
307/*
308 * gmu_iommu_init() - probe IOMMU context banks used by GMU
309 * and attach GMU device
310 * @gmu: Pointer to GMU device
311 * @node: Pointer to GMU device node
312 */
313int gmu_iommu_init(struct gmu_device *gmu, struct device_node *node)
314{
315 struct device_node *child;
316 struct gmu_iommu_context *ctx = NULL;
317 int ret, i;
318
319 of_platform_populate(node, NULL, NULL, &gmu->pdev->dev);
320
321 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
322 child = of_find_compatible_node(node, NULL, cbs[i].compatible);
323 if (child) {
324 ctx = &gmu_ctx[cbs[i].index];
325 ret = gmu_iommu_cb_probe(gmu, ctx, child);
326 if (ret)
327 return ret;
328 iommu_set_fault_handler(ctx->domain,
329 cbs[i].hdlr, ctx);
330 }
331 }
332
333 for (i = 0; i < ARRAY_SIZE(gmu_ctx); i++) {
334 if (gmu_ctx[i].domain == NULL) {
335 dev_err(&gmu->pdev->dev,
336 "Missing GMU %s context bank node\n",
337 gmu_ctx[i].name);
338 return -EINVAL;
339 }
340 }
341
342 return 0;
343}
344
345/*
346 * gmu_kmem_close() - free all kernel memory allocated for GMU and detach GMU
347 * from IOMMU context banks.
348 * @gmu: Pointer to GMU device
349 */
350void gmu_kmem_close(struct gmu_device *gmu)
351{
352 int i;
353 struct gmu_memdesc *md = &gmu->fw_image;
354 struct gmu_iommu_context *ctx = &gmu_ctx[GMU_CONTEXT_KERNEL];
355
356 /* Free GMU image memory */
357 free_gmu_mem(gmu, md);
358
359 /* Unmap image memory */
360 iommu_unmap(ctx->domain,
361 gmu->fw_image.gmuaddr,
362 gmu->fw_image.size);
363
364
365 gmu->hfi_mem = NULL;
366 gmu->dump_mem = NULL;
367
368 /* Unmap all memories in GMU kernel memory pool */
369 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
370 struct gmu_memdesc *memptr = &gmu_kmem_entries[i];
371
372 if (memptr->gmuaddr)
373 iommu_unmap(ctx->domain, memptr->gmuaddr, memptr->size);
374 }
375
376 /* Free GMU shared kernel memory */
377 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
378 md = &gmu_kmem_entries[i];
379 free_gmu_mem(gmu, md);
380 clear_bit(i, &gmu_kmem_bitmap);
381 }
382
383 /* Detach the device from SMMU context bank */
384 iommu_detach_device(ctx->domain, ctx->dev);
385
386 /* free kernel mem context */
387 iommu_domain_free(ctx->domain);
388}
389
390void gmu_memory_close(struct gmu_device *gmu)
391{
392 gmu_kmem_close(gmu);
393 /* Free user memory context */
394 iommu_domain_free(gmu_ctx[GMU_CONTEXT_USER].domain);
395
396}
397
398/*
399 * gmu_memory_probe() - probe GMU IOMMU context banks and allocate memory
400 * to share with GMU in kernel mode.
401 * @gmu: Pointer to GMU device
402 * @node: Pointer to GMU device node
403 */
404int gmu_memory_probe(struct gmu_device *gmu, struct device_node *node)
405{
406 int ret;
407
408 ret = gmu_iommu_init(gmu, node);
409 if (ret)
410 return ret;
411
412 /* Allocates & maps memory for HFI */
413 gmu->hfi_mem = allocate_gmu_kmem(gmu, HFIMEM_SIZE,
414 (IOMMU_READ | IOMMU_WRITE));
415 if (IS_ERR(gmu->hfi_mem)) {
416 ret = PTR_ERR(gmu->hfi_mem);
417 goto err_ret;
418 }
419
420 /* Allocates & maps GMU crash dump memory */
421 gmu->dump_mem = allocate_gmu_kmem(gmu, DUMPMEM_SIZE,
422 (IOMMU_READ | IOMMU_WRITE));
423 if (IS_ERR(gmu->dump_mem)) {
424 ret = PTR_ERR(gmu->dump_mem);
425 goto err_ret;
426 }
427
428 return 0;
429err_ret:
430 gmu_memory_close(gmu);
431 return ret;
432}
433
434/*
435 * gmu_dcvs_set() - request GMU to change GPU frequency and/or bandwidth.
436 * @gmu: Pointer to GMU device
437 * @gpu_pwrlevel: index to GPU DCVS table used by KGSL
438 * @bus_level: index to GPU bus table used by KGSL
439 *
440 * The function converts GPU power level and bus level index used by KGSL
441 * to index being used by GMU/RPMh.
442 */
443int gmu_dcvs_set(struct gmu_device *gmu,
444 unsigned int gpu_pwrlevel, unsigned int bus_level)
445{
446 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
447 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
448 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
449 int perf_idx = INVALID_DCVS_IDX, bw_idx = INVALID_DCVS_IDX;
George Shena2f7b432017-08-18 12:58:18 -0700450 int ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800451
George Shena2f7b432017-08-18 12:58:18 -0700452 if (gpu_pwrlevel < gmu->num_gpupwrlevels - 1)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800453 perf_idx = gmu->num_gpupwrlevels - gpu_pwrlevel - 1;
454
George Shena2f7b432017-08-18 12:58:18 -0700455 if (bus_level < gmu->num_bwlevels && bus_level > 0)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800456 bw_idx = bus_level;
457
458 if ((perf_idx == INVALID_DCVS_IDX) &&
459 (bw_idx == INVALID_DCVS_IDX))
460 return -EINVAL;
461
George Shena2f7b432017-08-18 12:58:18 -0700462 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
463 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev,
Kyle Pieferb1027b02017-02-10 13:58:58 -0800464 GMU_DCVS_NOHFI, perf_idx, bw_idx);
465
George Shena2f7b432017-08-18 12:58:18 -0700466 if (ret) {
George Shenf135a972017-08-24 16:59:42 -0700467 dev_err_ratelimited(&gmu->pdev->dev,
George Shena2f7b432017-08-18 12:58:18 -0700468 "Failed to set GPU perf idx %d, bw idx %d\n",
469 perf_idx, bw_idx);
470
George Shenf135a972017-08-24 16:59:42 -0700471 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
472 adreno_dispatcher_schedule(device);
George Shena2f7b432017-08-18 12:58:18 -0700473 }
474
475 return ret;
476 }
477
Kyle Pieferb1027b02017-02-10 13:58:58 -0800478 return hfi_send_dcvs_vote(gmu, perf_idx, bw_idx, ACK_NONBLOCK);
479}
480
481struct rpmh_arc_vals {
482 unsigned int num;
483 uint16_t val[MAX_GX_LEVELS];
484};
485
486static const char gfx_res_id[] = "gfx.lvl";
487static const char cx_res_id[] = "cx.lvl";
488static const char mx_res_id[] = "mx.lvl";
489
490enum rpmh_vote_type {
491 GPU_ARC_VOTE = 0,
492 GMU_ARC_VOTE,
493 INVALID_ARC_VOTE,
494};
495
496static const char debug_strs[][8] = {
497 [GPU_ARC_VOTE] = "gpu",
498 [GMU_ARC_VOTE] = "gmu",
499};
500
501/*
502 * rpmh_arc_cmds() - query RPMh command database for GX/CX/MX rail
503 * VLVL tables. The index of table will be used by GMU to vote rail
504 * voltage.
505 *
506 * @gmu: Pointer to GMU device
507 * @arc: Pointer to RPMh rail controller (ARC) voltage table
508 * @res_id: Pointer to 8 char array that contains rail name
509 */
510static int rpmh_arc_cmds(struct gmu_device *gmu,
511 struct rpmh_arc_vals *arc, const char *res_id)
512{
513 unsigned int len;
514
515 len = cmd_db_get_aux_data_len(res_id);
Kyle Pieferec7b4192017-08-17 15:35:36 -0700516 if (len == 0)
517 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800518
519 if (len > (MAX_GX_LEVELS << 1)) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800520 dev_err(&gmu->pdev->dev,
521 "gfx cmddb size %d larger than alloc buf %d of %s\n",
522 len, (MAX_GX_LEVELS << 1), res_id);
523 return -EINVAL;
524 }
525
526 cmd_db_get_aux_data(res_id, (uint8_t *)arc->val, len);
Kyle Pieferec7b4192017-08-17 15:35:36 -0700527
528 /*
529 * cmd_db_get_aux_data() gives us a zero-padded table of
530 * size len that contains the arc values. To determine the
531 * number of arc values, we loop through the table and count
532 * them until we get to the end of the buffer or hit the
533 * zero padding.
534 */
Archana Sriram8ce571b2017-10-12 18:33:33 +0530535 for (arc->num = 1; arc->num < (len >> 1); arc->num++) {
536 if (arc->val[arc->num - 1] >= arc->val[arc->num])
George Shen07b4f782017-07-13 10:42:53 -0700537 break;
538 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800539
540 return 0;
541}
542
543/*
544 * setup_volt_dependency_tbl() - set up GX->MX or CX->MX rail voltage
545 * dependencies. Second rail voltage shall be equal to or higher than
546 * primary rail voltage. VLVL table index was used by RPMh for PMIC
547 * voltage setting.
548 * @votes: Pointer to a ARC vote descriptor
549 * @pri_rail: Pointer to primary power rail VLVL table
550 * @sec_rail: Pointer to second/dependent power rail VLVL table
551 * @vlvl: Pointer to VLVL table being used by GPU or GMU driver, a subset
552 * of pri_rail VLVL table
553 * @num_entries: Valid number of entries in table pointed by "vlvl" parameter
554 */
555static int setup_volt_dependency_tbl(struct arc_vote_desc *votes,
556 struct rpmh_arc_vals *pri_rail, struct rpmh_arc_vals *sec_rail,
557 unsigned int *vlvl, unsigned int num_entries)
558{
559 int i, j, k;
560 uint16_t cur_vlvl;
George Shen07b4f782017-07-13 10:42:53 -0700561 bool found_match;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800562
563 /* i tracks current KGSL GPU frequency table entry
564 * j tracks second rail voltage table entry
565 * k tracks primary rail voltage table entry
566 */
George Shen07b4f782017-07-13 10:42:53 -0700567 for (i = 0; i < num_entries; i++) {
568 found_match = false;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800569
George Shen07b4f782017-07-13 10:42:53 -0700570 /* Look for a primary rail voltage that matches a VLVL level */
571 for (k = 0; k < pri_rail->num; k++) {
572 if (pri_rail->val[k] == vlvl[i]) {
573 votes[i].pri_idx = k;
574 votes[i].vlvl = vlvl[i];
575 cur_vlvl = vlvl[i];
576 found_match = true;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800577 break;
578 }
579 }
580
George Shen07b4f782017-07-13 10:42:53 -0700581 /* If we did not find a matching VLVL level then abort */
582 if (!found_match)
583 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800584
George Shen07b4f782017-07-13 10:42:53 -0700585 /*
586 * Look for a secondary rail index whose VLVL value
587 * is greater than or equal to the VLVL value of the
588 * corresponding index of the primary rail
589 */
590 for (j = 0; j < sec_rail->num; j++) {
591 if (sec_rail->val[j] >= cur_vlvl ||
592 j + 1 == sec_rail->num) {
593 votes[i].sec_idx = j;
594 break;
595 }
596 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800597 }
598 return 0;
599}
600
601/*
602 * rpmh_arc_votes_init() - initialized RPMh votes needed for rails voltage
603 * scaling by GMU.
604 * @gmu: Pointer to GMU device
605 * @pri_rail: Pointer to primary power rail VLVL table
606 * @sec_rail: Pointer to second/dependent power rail VLVL table
607 * of pri_rail VLVL table
608 * @type: the type of the primary rail, GPU or GMU
609 */
610static int rpmh_arc_votes_init(struct gmu_device *gmu,
611 struct rpmh_arc_vals *pri_rail,
612 struct rpmh_arc_vals *sec_rail,
613 unsigned int type)
614{
George Shen07b4f782017-07-13 10:42:53 -0700615 struct device *dev;
616 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800617 unsigned int num_freqs;
618 struct arc_vote_desc *votes;
619 unsigned int vlvl_tbl[MAX_GX_LEVELS];
620 unsigned int *freq_tbl;
621 int i, ret;
George Shen07b4f782017-07-13 10:42:53 -0700622 struct dev_pm_opp *opp;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800623
624 if (type == GPU_ARC_VOTE) {
625 num_freqs = gmu->num_gpupwrlevels;
626 votes = gmu->rpmh_votes.gx_votes;
George Shen07b4f782017-07-13 10:42:53 -0700627 freq_tbl = gmu->gpu_freqs;
628 dev = &device->pdev->dev;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800629 } else if (type == GMU_ARC_VOTE) {
630 num_freqs = gmu->num_gmupwrlevels;
631 votes = gmu->rpmh_votes.cx_votes;
George Shen07b4f782017-07-13 10:42:53 -0700632 freq_tbl = gmu->gmu_freqs;
633 dev = &gmu->pdev->dev;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800634 } else {
635 return -EINVAL;
636 }
637
638 if (num_freqs > pri_rail->num) {
639 dev_err(&gmu->pdev->dev,
640 "%s defined more DCVS levels than RPMh can support\n",
641 debug_strs[type]);
642 return -EINVAL;
643 }
644
George Shen07b4f782017-07-13 10:42:53 -0700645 memset(vlvl_tbl, 0, sizeof(vlvl_tbl));
Kyle Pieferb1027b02017-02-10 13:58:58 -0800646 for (i = 0; i < num_freqs; i++) {
George Shen07b4f782017-07-13 10:42:53 -0700647 /* Hardcode VLVL for 0 because it is not registered in OPP */
648 if (freq_tbl[i] == 0) {
649 vlvl_tbl[i] = 0;
650 continue;
651 }
652
653 /* Otherwise get the value from the OPP API */
654 opp = dev_pm_opp_find_freq_exact(dev, freq_tbl[i], true);
655 if (IS_ERR(opp)) {
656 dev_err(&gmu->pdev->dev,
657 "Failed to find opp freq %d of %s\n",
658 freq_tbl[i], debug_strs[type]);
659 return PTR_ERR(opp);
660 }
661
662 /* Values from OPP framework are offset by 1 */
663 vlvl_tbl[i] = dev_pm_opp_get_voltage(opp) - 1;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800664 }
665
666 ret = setup_volt_dependency_tbl(votes,
667 pri_rail, sec_rail, vlvl_tbl, num_freqs);
668
669 if (ret)
670 dev_err(&gmu->pdev->dev, "%s rail volt failed to match DT freqs\n",
671 debug_strs[type]);
672
673 return ret;
674}
675
676/*
677 * build_rpmh_bw_votes() - build TCS commands to vote for bandwidth.
678 * Each command sets frequency of a node along path to DDR or CNOC.
679 * @rpmh_vote: Pointer to RPMh vote needed by GMU to set BW via RPMh
680 * @num_usecases: Number of BW use cases (or BW levels)
681 * @handle: Provided by bus driver. It contains TCS command sets for
682 * all BW use cases of a bus client.
683 */
684static void build_rpmh_bw_votes(struct gmu_bw_votes *rpmh_vote,
685 unsigned int num_usecases, struct msm_bus_tcs_handle handle)
686{
687 struct msm_bus_tcs_usecase *tmp;
688 int i, j;
689
690 for (i = 0; i < num_usecases; i++) {
691 tmp = &handle.usecases[i];
692 for (j = 0; j < tmp->num_cmds; j++) {
693 if (!i) {
694 /*
695 * Wait bitmask and TCS command addresses are
696 * same for all bw use cases. To save data volume
697 * exchanged between driver and GMU, only
698 * transfer bitmasks and TCS command addresses
699 * of first set of bw use case
700 */
701 rpmh_vote->cmds_per_bw_vote = tmp->num_cmds;
702 rpmh_vote->cmds_wait_bitmask =
703 tmp->cmds[j].complete ?
704 rpmh_vote->cmds_wait_bitmask
705 | BIT(i)
706 : rpmh_vote->cmds_wait_bitmask
707 & (~BIT(i));
708 rpmh_vote->cmd_addrs[j] = tmp->cmds[j].addr;
709 }
710 rpmh_vote->cmd_data[i][j] = tmp->cmds[j].data;
711 }
712 }
713}
714
715/*
716 * gmu_bus_vote_init - initialized RPMh votes needed for bw scaling by GMU.
717 * @gmu: Pointer to GMU device
718 * @pwr: Pointer to KGSL power controller
719 */
720static int gmu_bus_vote_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
721{
722 struct msm_bus_tcs_usecase *usecases;
723 struct msm_bus_tcs_handle hdl;
724 struct rpmh_votes_t *votes = &gmu->rpmh_votes;
725 int ret;
726
727 usecases = kcalloc(gmu->num_bwlevels, sizeof(*usecases), GFP_KERNEL);
728 if (!usecases)
729 return -ENOMEM;
730
731 hdl.num_usecases = gmu->num_bwlevels;
732 hdl.usecases = usecases;
733
734 /*
735 * Query TCS command set for each use case defined in GPU b/w table
736 */
737 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->pcl);
738 if (ret)
739 return ret;
740
741 build_rpmh_bw_votes(&votes->ddr_votes, gmu->num_bwlevels, hdl);
742
743 /*
744 *Query CNOC TCS command set for each use case defined in cnoc bw table
745 */
746 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->ccl);
747 if (ret)
748 return ret;
749
750 build_rpmh_bw_votes(&votes->cnoc_votes, gmu->num_cnocbwlevels, hdl);
751
752 kfree(usecases);
753
754 return 0;
755}
756
757int gmu_rpmh_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
758{
759 struct rpmh_arc_vals gfx_arc, cx_arc, mx_arc;
760 int ret;
761
762 /* Populate BW vote table */
763 ret = gmu_bus_vote_init(gmu, pwr);
764 if (ret)
765 return ret;
766
767 /* Populate GPU and GMU frequency vote table */
768 ret = rpmh_arc_cmds(gmu, &gfx_arc, gfx_res_id);
769 if (ret)
770 return ret;
771
772 ret = rpmh_arc_cmds(gmu, &cx_arc, cx_res_id);
773 if (ret)
774 return ret;
775
776 ret = rpmh_arc_cmds(gmu, &mx_arc, mx_res_id);
777 if (ret)
778 return ret;
779
780 ret = rpmh_arc_votes_init(gmu, &gfx_arc, &mx_arc, GPU_ARC_VOTE);
781 if (ret)
782 return ret;
783
784 return rpmh_arc_votes_init(gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE);
785}
786
787static irqreturn_t gmu_irq_handler(int irq, void *data)
788{
789 struct gmu_device *gmu = data;
790 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700791 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800792 unsigned int status = 0;
793
Kyle Piefere7b06b42017-04-06 13:53:01 -0700794 adreno_read_gmureg(ADRENO_DEVICE(device),
795 ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, &status);
796 adreno_write_gmureg(ADRENO_DEVICE(device),
797 ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800798
Kyle Piefere7b06b42017-04-06 13:53:01 -0700799 /* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700800 if (status & GMU_INT_WDOG_BITE) {
Kyle Piefere7b06b42017-04-06 13:53:01 -0700801 dev_err_ratelimited(&gmu->pdev->dev,
802 "GMU watchdog expired interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700803 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
804 adreno_dispatcher_schedule(device);
805 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700806 if (status & GMU_INT_HOST_AHB_BUS_ERR)
807 dev_err_ratelimited(&gmu->pdev->dev,
808 "AHB bus error interrupt received\n");
George Shenc59500e2017-10-11 14:37:32 -0700809 if (status & GMU_INT_FENCE_ERR) {
810 unsigned int fence_status;
811
812 adreno_read_gmureg(ADRENO_DEVICE(device),
813 ADRENO_REG_GMU_AHB_FENCE_STATUS, &fence_status);
814 dev_err_ratelimited(&gmu->pdev->dev,
815 "FENCE error interrupt received %x\n", fence_status);
816 }
817
Kyle Piefere7b06b42017-04-06 13:53:01 -0700818 if (status & ~GMU_AO_INT_MASK)
819 dev_err_ratelimited(&gmu->pdev->dev,
820 "Unhandled GMU interrupts 0x%lx\n",
821 status & ~GMU_AO_INT_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800822
Kyle Piefere7b06b42017-04-06 13:53:01 -0700823 return IRQ_HANDLED;
824}
Kyle Pieferb1027b02017-02-10 13:58:58 -0800825
Kyle Piefere7b06b42017-04-06 13:53:01 -0700826static irqreturn_t hfi_irq_handler(int irq, void *data)
827{
828 struct kgsl_hfi *hfi = data;
829 struct gmu_device *gmu = container_of(hfi, struct gmu_device, hfi);
830 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700831 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -0700832 unsigned int status = 0;
833
834 adreno_read_gmureg(ADRENO_DEVICE(device),
835 ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
836 adreno_write_gmureg(ADRENO_DEVICE(device),
837 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status);
838
839 if (status & HFI_IRQ_MSGQ_MASK)
840 tasklet_hi_schedule(&hfi->tasklet);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700841 if (status & HFI_IRQ_CM3_FAULT_MASK) {
Kyle Piefere7b06b42017-04-06 13:53:01 -0700842 dev_err_ratelimited(&gmu->pdev->dev,
843 "GMU CM3 fault interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700844 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
845 adreno_dispatcher_schedule(device);
846 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700847 if (status & ~HFI_IRQ_MASK)
848 dev_err_ratelimited(&gmu->pdev->dev,
849 "Unhandled HFI interrupts 0x%lx\n",
850 status & ~HFI_IRQ_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800851
852 return IRQ_HANDLED;
853}
854
855static int gmu_pwrlevel_probe(struct gmu_device *gmu, struct device_node *node)
856{
857 struct device_node *pwrlevel_node, *child;
858
859 pwrlevel_node = of_find_node_by_name(node, "qcom,gmu-pwrlevels");
860
861 if (pwrlevel_node == NULL) {
862 dev_err(&gmu->pdev->dev, "Unable to find 'qcom,gmu-pwrlevels'\n");
863 return -EINVAL;
864 }
865
866 gmu->num_gmupwrlevels = 0;
867
868 for_each_child_of_node(pwrlevel_node, child) {
869 unsigned int index;
870
871 if (of_property_read_u32(child, "reg", &index))
872 return -EINVAL;
873
874 if (index >= MAX_CX_LEVELS) {
875 dev_err(&gmu->pdev->dev, "gmu pwrlevel %d is out of range\n",
876 index);
877 continue;
878 }
879
880 if (index >= gmu->num_gmupwrlevels)
881 gmu->num_gmupwrlevels = index + 1;
882
883 if (of_property_read_u32(child, "qcom,gmu-freq",
884 &gmu->gmu_freqs[index]))
885 return -EINVAL;
886 }
887
888 return 0;
889}
890
George Shencc7e1092017-08-30 10:45:52 -0700891static int gmu_reg_probe(struct gmu_device *gmu, const char *name, bool is_gmu)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800892{
893 struct resource *res;
894
895 res = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM, name);
896 if (res == NULL) {
897 dev_err(&gmu->pdev->dev,
898 "platform_get_resource %s failed\n", name);
899 return -EINVAL;
900 }
901
902 if (res->start == 0 || resource_size(res) == 0) {
903 dev_err(&gmu->pdev->dev,
904 "dev %d %s invalid register region\n",
905 gmu->pdev->dev.id, name);
906 return -EINVAL;
907 }
908
George Shencc7e1092017-08-30 10:45:52 -0700909 if (is_gmu) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800910 gmu->reg_phys = res->start;
911 gmu->reg_len = resource_size(res);
912 gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
913 resource_size(res));
914
915 if (gmu->reg_virt == NULL) {
916 dev_err(&gmu->pdev->dev, "GMU regs ioremap failed\n");
917 return -ENODEV;
918 }
919
George Shencc7e1092017-08-30 10:45:52 -0700920 } else {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800921 gmu->pdc_reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
922 resource_size(res));
923 if (gmu->pdc_reg_virt == NULL) {
924 dev_err(&gmu->pdev->dev, "PDC regs ioremap failed\n");
925 return -ENODEV;
926 }
927 }
928
929 return 0;
930}
931
932static int gmu_clocks_probe(struct gmu_device *gmu, struct device_node *node)
933{
934 const char *cname;
935 struct property *prop;
936 struct clk *c;
937 int i = 0;
938
939 of_property_for_each_string(node, "clock-names", prop, cname) {
940 c = devm_clk_get(&gmu->pdev->dev, cname);
941
942 if (IS_ERR(c)) {
943 dev_err(&gmu->pdev->dev,
944 "dt: Couldn't get GMU clock: %s\n", cname);
945 return PTR_ERR(c);
946 }
947
948 if (i >= MAX_GMU_CLKS) {
949 dev_err(&gmu->pdev->dev,
950 "dt: too many GMU clocks defined\n");
951 return -EINVAL;
952 }
953
954 gmu->clks[i++] = c;
955 }
956
957 return 0;
958}
959
960static int gmu_gpu_bw_probe(struct gmu_device *gmu)
961{
962 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
963 struct msm_bus_scale_pdata *bus_scale_table;
964
965 bus_scale_table = msm_bus_cl_get_pdata(device->pdev);
966 if (bus_scale_table == NULL) {
967 dev_err(&gmu->pdev->dev, "dt: cannot get bus table\n");
968 return -ENODEV;
969 }
970
971 gmu->num_bwlevels = bus_scale_table->num_usecases;
972 gmu->pcl = msm_bus_scale_register_client(bus_scale_table);
973 if (!gmu->pcl) {
974 dev_err(&gmu->pdev->dev, "dt: cannot register bus client\n");
975 return -ENODEV;
976 }
977
978 return 0;
979}
980
981static int gmu_cnoc_bw_probe(struct gmu_device *gmu)
982{
983 struct msm_bus_scale_pdata *cnoc_table;
984
985 cnoc_table = msm_bus_cl_get_pdata(gmu->pdev);
986 if (cnoc_table == NULL) {
987 dev_err(&gmu->pdev->dev, "dt: cannot get cnoc table\n");
988 return -ENODEV;
989 }
990
991 gmu->num_cnocbwlevels = cnoc_table->num_usecases;
992 gmu->ccl = msm_bus_scale_register_client(cnoc_table);
993 if (!gmu->ccl) {
994 dev_err(&gmu->pdev->dev, "dt: cannot register cnoc client\n");
995 return -ENODEV;
996 }
997
998 return 0;
999}
1000
1001static int gmu_regulators_probe(struct gmu_device *gmu,
1002 struct device_node *node)
1003{
1004 const char *name;
1005 struct property *prop;
1006 struct device *dev = &gmu->pdev->dev;
1007 int ret = 0;
1008
1009 of_property_for_each_string(node, "regulator-names", prop, name) {
1010 if (!strcmp(name, "vddcx")) {
1011 gmu->cx_gdsc = devm_regulator_get(dev, name);
1012 if (IS_ERR(gmu->cx_gdsc)) {
1013 ret = PTR_ERR(gmu->cx_gdsc);
1014 dev_err(dev, "dt: GMU couldn't get CX gdsc\n");
1015 gmu->cx_gdsc = NULL;
1016 return ret;
1017 }
1018 } else if (!strcmp(name, "vdd")) {
1019 gmu->gx_gdsc = devm_regulator_get(dev, name);
1020 if (IS_ERR(gmu->gx_gdsc)) {
1021 ret = PTR_ERR(gmu->gx_gdsc);
1022 dev_err(dev, "dt: GMU couldn't get GX gdsc\n");
1023 gmu->gx_gdsc = NULL;
1024 return ret;
1025 }
1026 } else {
1027 dev_err(dev, "dt: Unknown GMU regulator: %s\n", name);
1028 return -ENODEV;
1029 }
1030 }
1031
1032 return 0;
1033}
1034
Kyle Piefere7b06b42017-04-06 13:53:01 -07001035static int gmu_irq_probe(struct gmu_device *gmu)
1036{
1037 int ret;
1038 struct kgsl_hfi *hfi = &gmu->hfi;
1039
1040 hfi->hfi_interrupt_num = platform_get_irq_byname(gmu->pdev,
1041 "kgsl_hfi_irq");
1042 ret = devm_request_irq(&gmu->pdev->dev,
1043 hfi->hfi_interrupt_num,
1044 hfi_irq_handler, IRQF_TRIGGER_HIGH,
1045 "HFI", hfi);
1046 if (ret) {
1047 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1048 hfi->hfi_interrupt_num, ret);
1049 return ret;
1050 }
1051
1052 gmu->gmu_interrupt_num = platform_get_irq_byname(gmu->pdev,
1053 "kgsl_gmu_irq");
1054 ret = devm_request_irq(&gmu->pdev->dev,
1055 gmu->gmu_interrupt_num,
1056 gmu_irq_handler, IRQF_TRIGGER_HIGH,
1057 "GMU", gmu);
1058 if (ret)
1059 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1060 gmu->gmu_interrupt_num, ret);
1061
1062 return ret;
1063}
1064
1065static void gmu_irq_enable(struct kgsl_device *device)
1066{
1067 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1068 struct gmu_device *gmu = &device->gmu;
1069 struct kgsl_hfi *hfi = &gmu->hfi;
1070
1071 /* Clear any pending IRQs before unmasking on GMU */
1072 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1073 0xFFFFFFFF);
1074 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1075 0xFFFFFFFF);
1076
1077 /* Unmask needed IRQs on GMU */
1078 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1079 (unsigned int) ~HFI_IRQ_MASK);
1080 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1081 (unsigned int) ~GMU_AO_INT_MASK);
1082
1083 /* Enable all IRQs on host */
1084 enable_irq(hfi->hfi_interrupt_num);
1085 enable_irq(gmu->gmu_interrupt_num);
1086}
1087
1088static void gmu_irq_disable(struct kgsl_device *device)
1089{
1090 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1091 struct gmu_device *gmu = &device->gmu;
1092 struct kgsl_hfi *hfi = &gmu->hfi;
1093
1094 /* Disable all IRQs on host */
1095 disable_irq(gmu->gmu_interrupt_num);
1096 disable_irq(hfi->hfi_interrupt_num);
1097
1098 /* Mask all IRQs on GMU */
1099 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1100 0xFFFFFFFF);
1101 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1102 0xFFFFFFFF);
1103
1104 /* Clear any pending IRQs before disabling */
1105 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1106 0xFFFFFFFF);
1107 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1108 0xFFFFFFFF);
1109}
1110
Kyle Pieferb1027b02017-02-10 13:58:58 -08001111/* Do not access any GMU registers in GMU probe function */
1112int gmu_probe(struct kgsl_device *device)
1113{
1114 struct device_node *node;
1115 struct gmu_device *gmu = &device->gmu;
1116 struct gmu_memdesc *mem_addr = NULL;
1117 struct kgsl_hfi *hfi = &gmu->hfi;
1118 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Kyle Pieferd3964162017-04-06 15:44:03 -07001119 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001120 int i = 0, ret = -ENXIO;
1121
1122 node = of_find_compatible_node(device->pdev->dev.of_node,
1123 NULL, "qcom,gpu-gmu");
1124
1125 if (node == NULL)
1126 return ret;
1127
1128 device->gmu.pdev = of_find_device_by_node(node);
1129
1130 /* Set up GMU regulators */
1131 ret = gmu_regulators_probe(gmu, node);
1132 if (ret)
1133 goto error;
1134
1135 /* Set up GMU clocks */
1136 ret = gmu_clocks_probe(gmu, node);
1137 if (ret)
1138 goto error;
1139
1140 /* Set up GMU IOMMU and shared memory with GMU */
1141 ret = gmu_memory_probe(&device->gmu, node);
1142 if (ret)
1143 goto error;
1144 mem_addr = gmu->hfi_mem;
1145
1146 /* Map and reserve GMU CSRs registers */
George Shencc7e1092017-08-30 10:45:52 -07001147 ret = gmu_reg_probe(gmu, "kgsl_gmu_reg", true);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001148 if (ret)
1149 goto error;
1150
George Shencc7e1092017-08-30 10:45:52 -07001151 ret = gmu_reg_probe(gmu, "kgsl_gmu_pdc_reg", false);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001152 if (ret)
1153 goto error;
1154
1155 gmu->gmu2gpu_offset = (gmu->reg_phys - device->reg_phys) >> 2;
1156
Kyle Piefere7b06b42017-04-06 13:53:01 -07001157 /* Initialize HFI and GMU interrupts */
1158 ret = gmu_irq_probe(gmu);
1159 if (ret)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001160 goto error;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001161
1162 /* Don't enable GMU interrupts until GMU started */
Kyle Piefere7b06b42017-04-06 13:53:01 -07001163 /* We cannot use gmu_irq_disable because it writes registers */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001164 disable_irq(gmu->gmu_interrupt_num);
1165 disable_irq(hfi->hfi_interrupt_num);
1166
1167 tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long)gmu);
1168 INIT_LIST_HEAD(&hfi->msglist);
1169 spin_lock_init(&hfi->msglock);
1170
1171 /* Retrieves GMU/GPU power level configurations*/
1172 ret = gmu_pwrlevel_probe(gmu, node);
1173 if (ret)
1174 goto error;
1175
1176 gmu->num_gpupwrlevels = pwr->num_pwrlevels;
1177
1178 for (i = 0; i < gmu->num_gpupwrlevels; i++) {
1179 int j = gmu->num_gpupwrlevels - 1 - i;
1180
1181 gmu->gpu_freqs[i] = pwr->pwrlevels[j].gpu_freq;
1182 }
1183
1184 /* Initializes GPU b/w levels configuration */
1185 ret = gmu_gpu_bw_probe(gmu);
1186 if (ret)
1187 goto error;
1188
1189 /* Initialize GMU CNOC b/w levels configuration */
1190 ret = gmu_cnoc_bw_probe(gmu);
1191 if (ret)
1192 goto error;
1193
1194 /* Populates RPMh configurations */
1195 ret = gmu_rpmh_init(gmu, pwr);
1196 if (ret)
1197 goto error;
1198
1199 hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
1200
Kyle Pieferd3964162017-04-06 15:44:03 -07001201 /* Set up GMU idle states */
1202 if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT))
1203 gmu->idle_level = GPU_HW_MIN_VOLT;
1204 else if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP))
1205 gmu->idle_level = GPU_HW_NAP;
1206 else if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
1207 gmu->idle_level = GPU_HW_IFPC;
1208 else if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
1209 gmu->idle_level = GPU_HW_SPTP_PC;
1210 else
1211 gmu->idle_level = GPU_HW_ACTIVE;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001212
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07001213 /* disable LM during boot time */
1214 clear_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001215 return 0;
1216
1217error:
1218 gmu_remove(device);
1219 return ret;
1220}
1221
1222
1223
1224static int gmu_enable_clks(struct gmu_device *gmu)
1225{
1226 int ret, j = 0;
1227
1228 if (IS_ERR_OR_NULL(gmu->clks[0]))
1229 return -EINVAL;
1230
1231 ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1232 if (ret) {
1233 dev_err(&gmu->pdev->dev, "fail to set default GMU clk freq %d\n",
1234 gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1235 return ret;
1236 }
1237
1238 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1239 ret = clk_prepare_enable(gmu->clks[j]);
1240 if (ret) {
1241 dev_err(&gmu->pdev->dev,
1242 "fail to enable gpucc clk idx %d\n",
1243 j);
1244 return ret;
1245 }
1246 j++;
1247 }
1248
1249 set_bit(GMU_CLK_ON, &gmu->flags);
1250 return 0;
1251}
1252
1253static int gmu_disable_clks(struct gmu_device *gmu)
1254{
Kyle Pieferde855722017-07-07 12:18:59 -07001255 int j = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001256
1257 if (IS_ERR_OR_NULL(gmu->clks[0]))
1258 return 0;
1259
Kyle Pieferb1027b02017-02-10 13:58:58 -08001260 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1261 clk_disable_unprepare(gmu->clks[j]);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001262 j++;
1263 }
1264
1265 clear_bit(GMU_CLK_ON, &gmu->flags);
1266 return 0;
1267
1268}
1269
1270static int gmu_enable_gdsc(struct gmu_device *gmu)
1271{
1272 int ret;
1273
1274 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1275 return 0;
1276
1277 ret = regulator_enable(gmu->cx_gdsc);
1278 if (ret)
1279 dev_err(&gmu->pdev->dev,
1280 "Failed to enable GMU CX gdsc, error %d\n", ret);
1281
1282 return ret;
1283}
1284
George Shenccf7ab42017-10-16 17:22:43 -07001285#define CX_GDSC_TIMEOUT 5000 /* ms */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001286static int gmu_disable_gdsc(struct gmu_device *gmu)
1287{
1288 int ret;
George Shen433b0c72017-06-12 09:44:34 -07001289 unsigned long t;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001290
1291 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1292 return 0;
1293
1294 ret = regulator_disable(gmu->cx_gdsc);
George Shen433b0c72017-06-12 09:44:34 -07001295 if (ret) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001296 dev_err(&gmu->pdev->dev,
1297 "Failed to disable GMU CX gdsc, error %d\n", ret);
George Shen433b0c72017-06-12 09:44:34 -07001298 return ret;
1299 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001300
George Shen433b0c72017-06-12 09:44:34 -07001301 /*
1302 * After GX GDSC is off, CX GDSC must be off
1303 * Voting off alone from GPU driver cannot
George Shenccf7ab42017-10-16 17:22:43 -07001304 * Guarantee CX GDSC off. Polling with 5s
George Shen433b0c72017-06-12 09:44:34 -07001305 * timeout to ensure
1306 */
1307 t = jiffies + msecs_to_jiffies(CX_GDSC_TIMEOUT);
1308 do {
1309 if (!regulator_is_enabled(gmu->cx_gdsc))
1310 return 0;
George Shenf364b542017-06-20 17:02:43 -07001311 cond_resched();
George Shen433b0c72017-06-12 09:44:34 -07001312
1313 } while (!(time_after(jiffies, t)));
1314
1315 dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout");
1316 return -ETIMEDOUT;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001317}
1318
Kyle Piefere923b7a2017-03-28 17:31:48 -07001319static int gmu_suspend(struct kgsl_device *device)
1320{
1321 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1322 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1323 struct gmu_device *gmu = &device->gmu;
1324
1325 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1326 return 0;
1327
1328 /* Pending message in all queues are abandoned */
1329 hfi_stop(gmu);
1330 clear_bit(GMU_HFI_ON, &gmu->flags);
1331 gmu_irq_disable(device);
1332
1333 if (gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0))
1334 return -EINVAL;
1335
1336 gmu_disable_clks(gmu);
1337 gmu_disable_gdsc(gmu);
George Shenf135a972017-08-24 16:59:42 -07001338 dev_err(&gmu->pdev->dev, "Suspended GMU\n");
Kyle Piefere923b7a2017-03-28 17:31:48 -07001339 return 0;
1340}
1341
George Shen6927d8f2017-07-19 11:38:10 -07001342static void gmu_snapshot(struct kgsl_device *device)
1343{
1344 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1345 struct gmu_device *gmu = &device->gmu;
1346
George Shenf135a972017-08-24 16:59:42 -07001347 if (!gmu->fault_count) {
George Shen6927d8f2017-07-19 11:38:10 -07001348 /* Mask so there's no interrupt caused by NMI */
1349 adreno_write_gmureg(adreno_dev,
1350 ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
1351
1352 /* Make sure the interrupt is masked before causing it */
1353 wmb();
1354 adreno_write_gmureg(adreno_dev,
1355 ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
1356 adreno_write_gmureg(adreno_dev,
1357 ADRENO_REG_GMU_CM3_CFG, (1 << 9));
1358
1359 /* Wait for the NMI to be handled */
1360 wmb();
1361 udelay(100);
Carter Cooperb88b7082017-09-14 09:03:26 -06001362 kgsl_device_snapshot(device, NULL, true);
George Shen6927d8f2017-07-19 11:38:10 -07001363
1364 adreno_write_gmureg(adreno_dev,
1365 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, 0xFFFFFFFF);
1366 adreno_write_gmureg(adreno_dev,
1367 ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1368 (unsigned int) ~HFI_IRQ_MASK);
1369 }
George Shenf135a972017-08-24 16:59:42 -07001370
1371 gmu->fault_count++;
George Shen6927d8f2017-07-19 11:38:10 -07001372}
1373
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301374static void gmu_change_gpu_pwrlevel(struct kgsl_device *device,
1375 unsigned int new_level) {
1376
1377 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1378 unsigned int old_level = pwr->active_pwrlevel;
1379
1380 /*
1381 * Update the level according to any thermal,
1382 * max/min, or power constraints.
1383 */
1384 new_level = kgsl_pwrctrl_adjust_pwrlevel(device, new_level);
1385
1386 /*
1387 * If thermal cycling is required and the new level hits the
1388 * thermal limit, kick off the cycling.
1389 */
1390 kgsl_pwrctrl_set_thermal_cycle(device, new_level);
1391
1392 pwr->active_pwrlevel = new_level;
1393 pwr->previous_pwrlevel = old_level;
1394
1395 /* Request adjusted DCVS level */
1396 kgsl_clk_set_rate(device, pwr->active_pwrlevel);
1397}
1398
Kyle Pieferb1027b02017-02-10 13:58:58 -08001399/* To be called to power on both GPU and GMU */
1400int gmu_start(struct kgsl_device *device)
1401{
Kyle Pieferb801ab92017-07-13 14:54:13 -07001402 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001403 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1404 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1405 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1406 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001407
Kyle Piefere923b7a2017-03-28 17:31:48 -07001408 switch (device->state) {
1409 case KGSL_STATE_INIT:
1410 case KGSL_STATE_SUSPEND:
1411 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1412 gmu_enable_gdsc(gmu);
1413 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001414 gmu_irq_enable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001415
Kyle Pieferb1027b02017-02-10 13:58:58 -08001416 /* Vote for 300MHz DDR for GMU to init */
1417 ret = msm_bus_scale_client_update_request(gmu->pcl,
Kyle Pieferb801ab92017-07-13 14:54:13 -07001418 pwr->pwrlevels[pwr->default_pwrlevel].bus_freq);
George Shenf135a972017-08-24 16:59:42 -07001419 if (ret)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001420 dev_err(&gmu->pdev->dev,
George Shenf135a972017-08-24 16:59:42 -07001421 "Failed to allocate gmu b/w: %d\n", ret);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001422
1423 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1424 GMU_COLD_BOOT, 0);
1425 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001426 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001427
1428 ret = hfi_start(gmu, GMU_COLD_BOOT);
1429 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001430 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001431
Oleg Perelet5306240a2017-09-19 12:32:51 -07001432 /* Request default DCVS level */
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301433 gmu_change_gpu_pwrlevel(device, pwr->default_pwrlevel);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001434 msm_bus_scale_client_update_request(gmu->pcl, 0);
1435 break;
1436
1437 case KGSL_STATE_SLUMBER:
1438 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1439 gmu_enable_gdsc(gmu);
1440 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001441 gmu_irq_enable(device);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001442
Kyle Pieferb1027b02017-02-10 13:58:58 -08001443 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1444 GMU_WARM_BOOT, 0);
1445 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001446 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001447
1448 ret = hfi_start(gmu, GMU_WARM_BOOT);
1449 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001450 goto error_gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001451
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301452 gmu_change_gpu_pwrlevel(device, pwr->default_pwrlevel);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001453 break;
1454
1455 case KGSL_STATE_RESET:
George Shenf135a972017-08-24 16:59:42 -07001456 if (test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv) ||
1457 test_bit(GMU_FAULT, &gmu->flags)) {
Kyle Piefere923b7a2017-03-28 17:31:48 -07001458 gmu_suspend(device);
1459 gmu_enable_gdsc(gmu);
1460 gmu_enable_clks(gmu);
George Shenf135a972017-08-24 16:59:42 -07001461 gmu_irq_enable(device);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001462
Kyle Piefere923b7a2017-03-28 17:31:48 -07001463 ret = gpudev->rpmh_gpu_pwrctrl(
1464 adreno_dev, GMU_FW_START, GMU_RESET, 0);
1465 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001466 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001467
Kyle Piefere923b7a2017-03-28 17:31:48 -07001468
Kyle Piefer7a714cd2017-06-21 15:55:47 -07001469 ret = hfi_start(gmu, GMU_COLD_BOOT);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001470 if (ret)
George Shenf135a972017-08-24 16:59:42 -07001471 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001472
1473 /* Send DCVS level prior to reset*/
Deepak Kumar534fa3e2017-10-13 21:21:39 +05301474 gmu_change_gpu_pwrlevel(device,
Oleg Perelet5306240a2017-09-19 12:32:51 -07001475 pwr->default_pwrlevel);
Kyle Piefer42de1402017-09-15 11:28:47 -07001476 } else {
1477 /* GMU fast boot */
1478 hfi_stop(gmu);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001479
Kyle Piefer42de1402017-09-15 11:28:47 -07001480 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1481 GMU_RESET, 0);
1482 if (ret)
1483 goto error_gmu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001484
Kyle Piefer42de1402017-09-15 11:28:47 -07001485 ret = hfi_start(gmu, GMU_WARM_BOOT);
1486 if (ret)
1487 goto error_gmu;
1488 }
Kyle Piefere923b7a2017-03-28 17:31:48 -07001489 break;
1490 default:
1491 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001492 }
1493
Kyle Piefere923b7a2017-03-28 17:31:48 -07001494 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001495
George Shenf135a972017-08-24 16:59:42 -07001496error_gmu:
Kyle Piefer42de1402017-09-15 11:28:47 -07001497 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1498 gpudev->oob_clear(adreno_dev,
1499 OOB_BOOT_SLUMBER_CLEAR_MASK);
George Shen6927d8f2017-07-19 11:38:10 -07001500 gmu_snapshot(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001501 return ret;
1502}
1503
1504/* Caller shall ensure GPU is ready for SLUMBER */
1505void gmu_stop(struct kgsl_device *device)
1506{
1507 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001508 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1509 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001510 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001511
1512 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1513 return;
1514
Kyle Piefer4033f562017-08-16 10:00:48 -07001515 /* Wait for the lowest idle level we requested */
1516 if (gpudev->wait_for_lowest_idle &&
1517 gpudev->wait_for_lowest_idle(adreno_dev))
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001518 goto error;
Carter Cooper1ee715a2017-09-07 16:08:38 -06001519
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001520 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
1521 if (ret)
1522 goto error;
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001523
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001524 if (gpudev->wait_for_gmu_idle &&
1525 gpudev->wait_for_gmu_idle(adreno_dev))
1526 goto error;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001527
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001528 /* Pending message in all queues are abandoned */
1529 hfi_stop(gmu);
1530 clear_bit(GMU_HFI_ON, &gmu->flags);
1531 gmu_irq_disable(device);
1532
1533 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
1534 gmu_disable_clks(gmu);
1535 gmu_disable_gdsc(gmu);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001536
1537 msm_bus_scale_client_update_request(gmu->pcl, 0);
Kyle Piefer38bf1f02017-09-11 14:38:59 -07001538 return;
1539
1540error:
1541 /*
1542 * The power controller will change state to SLUMBER anyway
1543 * Set GMU_FAULT flag to indicate to power contrller
1544 * that hang recovery is needed to power on GPU
1545 */
1546 set_bit(GMU_FAULT, &gmu->flags);
1547 dev_err(&gmu->pdev->dev, "Failed to stop GMU\n");
1548 gmu_snapshot(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001549}
1550
1551void gmu_remove(struct kgsl_device *device)
1552{
1553 struct gmu_device *gmu = &device->gmu;
1554 struct kgsl_hfi *hfi = &gmu->hfi;
Kyle Piefer8570d512017-04-21 14:50:51 -07001555 int i = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001556
1557 if (!device->gmu.pdev)
1558 return;
1559
1560 tasklet_kill(&hfi->tasklet);
1561
1562 gmu_stop(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -07001563 gmu_irq_disable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001564
Kyle Piefer8570d512017-04-21 14:50:51 -07001565 while ((i < MAX_GMU_CLKS) && gmu->clks[i]) {
1566 gmu->clks[i] = NULL;
1567 i++;
1568 }
1569
Kyle Pieferf7febd62017-03-20 16:49:49 -07001570 if (gmu->gmu_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001571 devm_free_irq(&gmu->pdev->dev,
1572 gmu->gmu_interrupt_num, gmu);
1573 gmu->gmu_interrupt_num = 0;
1574 }
1575
1576 if (hfi->hfi_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001577 devm_free_irq(&gmu->pdev->dev,
Kyle Piefercec5e212017-05-19 13:15:15 -07001578 hfi->hfi_interrupt_num, hfi);
Kyle Pieferf7febd62017-03-20 16:49:49 -07001579 hfi->hfi_interrupt_num = 0;
1580 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001581
1582 if (gmu->ccl) {
1583 msm_bus_scale_unregister_client(gmu->ccl);
1584 gmu->ccl = 0;
1585 }
1586
1587 if (gmu->pcl) {
1588 msm_bus_scale_unregister_client(gmu->pcl);
1589 gmu->pcl = 0;
1590 }
1591
1592 if (gmu->pdc_reg_virt) {
1593 devm_iounmap(&gmu->pdev->dev, gmu->pdc_reg_virt);
1594 gmu->pdc_reg_virt = NULL;
1595 }
1596
1597 if (gmu->reg_virt) {
1598 devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001599 gmu->reg_virt = NULL;
1600 }
1601
1602 if (gmu->hfi_mem || gmu->dump_mem)
1603 gmu_memory_close(&device->gmu);
1604
1605 for (i = 0; i < MAX_GMU_CLKS; i++) {
1606 if (gmu->clks[i]) {
1607 devm_clk_put(&gmu->pdev->dev, gmu->clks[i]);
1608 gmu->clks[i] = NULL;
1609 }
1610 }
1611
1612 if (gmu->gx_gdsc) {
1613 devm_regulator_put(gmu->gx_gdsc);
1614 gmu->gx_gdsc = NULL;
1615 }
1616
1617 if (gmu->cx_gdsc) {
1618 devm_regulator_put(gmu->cx_gdsc);
1619 gmu->cx_gdsc = NULL;
1620 }
1621
1622 device->gmu.pdev = NULL;
1623}