blob: 49630e6346a3e385bd0fc90baf6d929682c3eea1 [file] [log] [blame]
Kyle Pieferb1027b02017-02-10 13:58:58 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/iommu.h>
16#include <linux/of_platform.h>
17#include <linux/msm-bus.h>
18#include <linux/msm-bus-board.h>
19#include <linux/pm_opp.h>
20#include <soc/qcom/cmd-db.h>
21
22#include "kgsl_device.h"
23#include "kgsl_gmu.h"
24#include "kgsl_hfi.h"
25#include "a6xx_reg.h"
26#include "adreno.h"
27
28#define GMU_CONTEXT_USER 0
29#define GMU_CONTEXT_KERNEL 1
30#define GMU_KERNEL_ENTRIES 8
31
32enum gmu_iommu_mem_type {
33 GMU_CACHED_CODE,
34 GMU_CACHED_DATA,
35 GMU_NONCACHED_KERNEL,
36 GMU_NONCACHED_USER
37};
38
39/*
40 * GMU virtual memory mapping definitions
41 */
42struct gmu_vma {
43 unsigned int noncached_ustart;
44 unsigned int noncached_usize;
45 unsigned int noncached_kstart;
46 unsigned int noncached_ksize;
47 unsigned int cached_dstart;
48 unsigned int cached_dsize;
49 unsigned int cached_cstart;
50 unsigned int cached_csize;
51 unsigned int image_start;
52};
53
54struct gmu_iommu_context {
55 const char *name;
56 struct device *dev;
57 struct iommu_domain *domain;
58};
59
60#define HFIMEM_SIZE SZ_16K
61
62#define DUMPMEM_SIZE SZ_16K
63
64/* Define target specific GMU VMA configurations */
65static const struct gmu_vma vma = {
66 /* Noncached user segment */
67 0x80000000, SZ_1G,
68 /* Noncached kernel segment */
69 0x60000000, SZ_512M,
70 /* Cached data segment */
71 0x44000, (SZ_256K-SZ_16K),
72 /* Cached code segment */
73 0x0, (SZ_256K-SZ_16K),
74 /* FW image */
75 0x0,
76};
77
78struct gmu_iommu_context gmu_ctx[] = {
79 [GMU_CONTEXT_USER] = { .name = "gmu_user" },
80 [GMU_CONTEXT_KERNEL] = { .name = "gmu_kernel" }
81};
82
83/*
84 * There are a few static memory buffers that are allocated and mapped at boot
85 * time for GMU to function. The buffers are permanent (not freed) after
86 * GPU boot. The size of the buffers are constant and not expected to change.
87 *
88 * We define an array and a simple allocator to keep track of the currently
89 * active SMMU entries of GMU kernel mode context. Each entry is assigned
90 * a unique address inside GMU kernel mode address range. The addresses
91 * are assigned sequentially and aligned to 1MB each.
92 *
93 */
94static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES];
95static unsigned long gmu_kmem_bitmap;
96
Kyle Piefer11a48b62017-03-17 14:53:40 -070097/*
98 * kgsl_gmu_isenabled() - Check if there is a GMU and it is enabled
99 * @device: Pointer to the KGSL device that owns the GMU
100 *
101 * Check if a GMU has been found and successfully probed. Also
102 * check that the feature flag to use a GMU is enabled. Returns
103 * true if both of these conditions are met, otherwise false.
104 */
105bool kgsl_gmu_isenabled(struct kgsl_device *device)
106{
107 struct gmu_device *gmu = &device->gmu;
108 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
109
110 if (gmu->pdev && ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
111 return true;
112
113 return false;
114}
115
Kyle Pieferb1027b02017-02-10 13:58:58 -0800116static int _gmu_iommu_fault_handler(struct device *dev,
117 unsigned long addr, int flags, const char *name)
118{
119 char *fault_type = "unknown";
120
121 if (flags & IOMMU_FAULT_TRANSLATION)
122 fault_type = "translation";
123 else if (flags & IOMMU_FAULT_PERMISSION)
124 fault_type = "permission";
125
126 dev_err(dev, "GMU fault addr = %lX, context=%s (%s %s fault)\n",
127 addr, name,
128 (flags & IOMMU_FAULT_WRITE) ? "write" : "read",
129 fault_type);
130
131 return 0;
132}
133
134static int gmu_kernel_fault_handler(struct iommu_domain *domain,
135 struct device *dev, unsigned long addr, int flags, void *token)
136{
137 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_kernel");
138}
139
140static int gmu_user_fault_handler(struct iommu_domain *domain,
141 struct device *dev, unsigned long addr, int flags, void *token)
142{
143 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_user");
144}
145
146static void free_gmu_mem(struct gmu_device *gmu,
147 struct gmu_memdesc *md)
148{
149 /* Free GMU image memory */
150 if (md->hostptr)
151 dma_free_attrs(&gmu->pdev->dev, (size_t) md->size,
152 (void *)md->hostptr, md->physaddr, 0);
153 memset(md, 0, sizeof(*md));
154}
155
156static int alloc_and_map(struct gmu_device *gmu, unsigned int ctx_id,
157 struct gmu_memdesc *md, unsigned int attrs)
158{
159 int ret;
160 struct iommu_domain *domain;
161
162 domain = gmu_ctx[ctx_id].domain;
163
164 md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size,
165 &md->physaddr, GFP_KERNEL, 0);
166
167 if (md->hostptr == NULL)
168 return -ENOMEM;
169
170 ret = iommu_map(domain, md->gmuaddr,
171 md->physaddr, md->size,
172 attrs);
173
174 if (ret) {
175 dev_err(&gmu->pdev->dev,
176 "gmu map err: gaddr=0x%016llX, paddr=0x%016llX\n",
177 md->gmuaddr, md->physaddr);
178 free_gmu_mem(gmu, md);
179 }
180
181 return ret;
182}
183
184/*
185 * allocate_gmu_image() - allocates & maps memory for FW image, the size
186 * shall come from the loaded f/w file. Firmware image size shall be
187 * less than code cache size. Otherwise, FW may experience performance issue.
188 * @gmu: Pointer to GMU device
189 * @size: Requested allocation size
190 */
191int allocate_gmu_image(struct gmu_device *gmu, unsigned int size)
192{
193 struct gmu_memdesc *md = &gmu->fw_image;
194
195 if (size > vma.cached_csize) {
196 dev_err(&gmu->pdev->dev,
197 "GMU firmware size too big: %d\n", size);
198 return -EINVAL;
199 }
200
201 md->size = size;
202 md->gmuaddr = vma.image_start;
203 md->attr = GMU_CACHED_CODE;
204
205 return alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, IOMMU_READ);
206}
207
208/*
209 * allocate_gmu_kmem() - allocates and maps GMU kernel shared memory
210 * @gmu: Pointer to GMU device
211 * @size: Requested size
212 * @attrs: IOMMU mapping attributes
213 */
214static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
215 unsigned int size, unsigned int attrs)
216{
217 struct gmu_memdesc *md;
218 int ret, entry_idx = find_first_zero_bit(
219 &gmu_kmem_bitmap, GMU_KERNEL_ENTRIES);
220
221 size = PAGE_ALIGN(size);
222
223 if (size > SZ_1M || size == 0) {
224 dev_err(&gmu->pdev->dev,
225 "Requested %d bytes of GMU kernel memory, max=1MB\n",
226 size);
227 return ERR_PTR(-EINVAL);
228 }
229
230 if (entry_idx >= GMU_KERNEL_ENTRIES) {
231 dev_err(&gmu->pdev->dev,
232 "Ran out of GMU kernel mempool slots\n");
233 return ERR_PTR(-EINVAL);
234 }
235
236 /* Allocate GMU virtual memory */
237 md = &gmu_kmem_entries[entry_idx];
238 md->gmuaddr = vma.noncached_kstart + (entry_idx * SZ_1M);
239 set_bit(entry_idx, &gmu_kmem_bitmap);
240 md->attr = GMU_NONCACHED_KERNEL;
241 md->size = size;
242
243 ret = alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, attrs);
244
245 if (ret) {
246 clear_bit(entry_idx, &gmu_kmem_bitmap);
247 md->gmuaddr = 0;
248 return ERR_PTR(ret);
249 }
250
251 return md;
252}
253
254static int gmu_iommu_cb_probe(struct gmu_device *gmu,
255 struct gmu_iommu_context *ctx,
256 struct device_node *node)
257{
258 struct platform_device *pdev = of_find_device_by_node(node);
259 struct device *dev;
260 int ret;
261
262 dev = &pdev->dev;
263
264 ctx->dev = dev;
265 ctx->domain = iommu_domain_alloc(&platform_bus_type);
266 if (ctx->domain == NULL) {
267 dev_err(&gmu->pdev->dev, "gmu iommu fail to alloc %s domain\n",
268 ctx->name);
269 return -ENODEV;
270 }
271
272 ret = iommu_attach_device(ctx->domain, dev);
273 if (ret) {
274 dev_err(&gmu->pdev->dev, "gmu iommu fail to attach %s device\n",
275 ctx->name);
276 iommu_domain_free(ctx->domain);
277 }
278
279 return ret;
280}
281
282static struct {
283 const char *compatible;
284 int index;
285 iommu_fault_handler_t hdlr;
286} cbs[] = {
287 { "qcom,smmu-gmu-user-cb",
288 GMU_CONTEXT_USER,
289 gmu_user_fault_handler,
290 },
291 { "qcom,smmu-gmu-kernel-cb",
292 GMU_CONTEXT_KERNEL,
293 gmu_kernel_fault_handler,
294 },
295};
296
297/*
298 * gmu_iommu_init() - probe IOMMU context banks used by GMU
299 * and attach GMU device
300 * @gmu: Pointer to GMU device
301 * @node: Pointer to GMU device node
302 */
303int gmu_iommu_init(struct gmu_device *gmu, struct device_node *node)
304{
305 struct device_node *child;
306 struct gmu_iommu_context *ctx = NULL;
307 int ret, i;
308
309 of_platform_populate(node, NULL, NULL, &gmu->pdev->dev);
310
311 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
312 child = of_find_compatible_node(node, NULL, cbs[i].compatible);
313 if (child) {
314 ctx = &gmu_ctx[cbs[i].index];
315 ret = gmu_iommu_cb_probe(gmu, ctx, child);
316 if (ret)
317 return ret;
318 iommu_set_fault_handler(ctx->domain,
319 cbs[i].hdlr, ctx);
320 }
321 }
322
323 for (i = 0; i < ARRAY_SIZE(gmu_ctx); i++) {
324 if (gmu_ctx[i].domain == NULL) {
325 dev_err(&gmu->pdev->dev,
326 "Missing GMU %s context bank node\n",
327 gmu_ctx[i].name);
328 return -EINVAL;
329 }
330 }
331
332 return 0;
333}
334
335/*
336 * gmu_kmem_close() - free all kernel memory allocated for GMU and detach GMU
337 * from IOMMU context banks.
338 * @gmu: Pointer to GMU device
339 */
340void gmu_kmem_close(struct gmu_device *gmu)
341{
342 int i;
343 struct gmu_memdesc *md = &gmu->fw_image;
344 struct gmu_iommu_context *ctx = &gmu_ctx[GMU_CONTEXT_KERNEL];
345
346 /* Free GMU image memory */
347 free_gmu_mem(gmu, md);
348
349 /* Unmap image memory */
350 iommu_unmap(ctx->domain,
351 gmu->fw_image.gmuaddr,
352 gmu->fw_image.size);
353
354
355 gmu->hfi_mem = NULL;
356 gmu->dump_mem = NULL;
357
358 /* Unmap all memories in GMU kernel memory pool */
359 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
360 struct gmu_memdesc *memptr = &gmu_kmem_entries[i];
361
362 if (memptr->gmuaddr)
363 iommu_unmap(ctx->domain, memptr->gmuaddr, memptr->size);
364 }
365
366 /* Free GMU shared kernel memory */
367 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
368 md = &gmu_kmem_entries[i];
369 free_gmu_mem(gmu, md);
370 clear_bit(i, &gmu_kmem_bitmap);
371 }
372
373 /* Detach the device from SMMU context bank */
374 iommu_detach_device(ctx->domain, ctx->dev);
375
376 /* free kernel mem context */
377 iommu_domain_free(ctx->domain);
378}
379
380void gmu_memory_close(struct gmu_device *gmu)
381{
382 gmu_kmem_close(gmu);
383 /* Free user memory context */
384 iommu_domain_free(gmu_ctx[GMU_CONTEXT_USER].domain);
385
386}
387
388/*
389 * gmu_memory_probe() - probe GMU IOMMU context banks and allocate memory
390 * to share with GMU in kernel mode.
391 * @gmu: Pointer to GMU device
392 * @node: Pointer to GMU device node
393 */
394int gmu_memory_probe(struct gmu_device *gmu, struct device_node *node)
395{
396 int ret;
397
398 ret = gmu_iommu_init(gmu, node);
399 if (ret)
400 return ret;
401
402 /* Allocates & maps memory for HFI */
403 gmu->hfi_mem = allocate_gmu_kmem(gmu, HFIMEM_SIZE,
404 (IOMMU_READ | IOMMU_WRITE));
405 if (IS_ERR(gmu->hfi_mem)) {
406 ret = PTR_ERR(gmu->hfi_mem);
407 goto err_ret;
408 }
409
410 /* Allocates & maps GMU crash dump memory */
411 gmu->dump_mem = allocate_gmu_kmem(gmu, DUMPMEM_SIZE,
412 (IOMMU_READ | IOMMU_WRITE));
413 if (IS_ERR(gmu->dump_mem)) {
414 ret = PTR_ERR(gmu->dump_mem);
415 goto err_ret;
416 }
417
418 return 0;
419err_ret:
420 gmu_memory_close(gmu);
421 return ret;
422}
423
424/*
425 * gmu_dcvs_set() - request GMU to change GPU frequency and/or bandwidth.
426 * @gmu: Pointer to GMU device
427 * @gpu_pwrlevel: index to GPU DCVS table used by KGSL
428 * @bus_level: index to GPU bus table used by KGSL
429 *
430 * The function converts GPU power level and bus level index used by KGSL
431 * to index being used by GMU/RPMh.
432 */
433int gmu_dcvs_set(struct gmu_device *gmu,
434 unsigned int gpu_pwrlevel, unsigned int bus_level)
435{
436 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
437 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
438 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
439 int perf_idx = INVALID_DCVS_IDX, bw_idx = INVALID_DCVS_IDX;
440
441 if (gpu_pwrlevel < gmu->num_gpupwrlevels)
442 perf_idx = gmu->num_gpupwrlevels - gpu_pwrlevel - 1;
443
444 if (bus_level < gmu->num_bwlevels)
445 bw_idx = bus_level;
446
447 if ((perf_idx == INVALID_DCVS_IDX) &&
448 (bw_idx == INVALID_DCVS_IDX))
449 return -EINVAL;
450
451 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
452 return gpudev->rpmh_gpu_pwrctrl(adreno_dev,
453 GMU_DCVS_NOHFI, perf_idx, bw_idx);
454
455 return hfi_send_dcvs_vote(gmu, perf_idx, bw_idx, ACK_NONBLOCK);
456}
457
458struct rpmh_arc_vals {
459 unsigned int num;
460 uint16_t val[MAX_GX_LEVELS];
461};
462
463static const char gfx_res_id[] = "gfx.lvl";
464static const char cx_res_id[] = "cx.lvl";
465static const char mx_res_id[] = "mx.lvl";
466
467enum rpmh_vote_type {
468 GPU_ARC_VOTE = 0,
469 GMU_ARC_VOTE,
470 INVALID_ARC_VOTE,
471};
472
473static const char debug_strs[][8] = {
474 [GPU_ARC_VOTE] = "gpu",
475 [GMU_ARC_VOTE] = "gmu",
476};
477
478/*
479 * rpmh_arc_cmds() - query RPMh command database for GX/CX/MX rail
480 * VLVL tables. The index of table will be used by GMU to vote rail
481 * voltage.
482 *
483 * @gmu: Pointer to GMU device
484 * @arc: Pointer to RPMh rail controller (ARC) voltage table
485 * @res_id: Pointer to 8 char array that contains rail name
486 */
487static int rpmh_arc_cmds(struct gmu_device *gmu,
488 struct rpmh_arc_vals *arc, const char *res_id)
489{
490 unsigned int len;
491
492 len = cmd_db_get_aux_data_len(res_id);
493
494 if (len > (MAX_GX_LEVELS << 1)) {
495 /* CmdDB VLVL table size in bytes is too large */
496 dev_err(&gmu->pdev->dev,
497 "gfx cmddb size %d larger than alloc buf %d of %s\n",
498 len, (MAX_GX_LEVELS << 1), res_id);
499 return -EINVAL;
500 }
501
502 cmd_db_get_aux_data(res_id, (uint8_t *)arc->val, len);
503 arc->num = len >> 1;
504
505 return 0;
506}
507
508/*
509 * setup_volt_dependency_tbl() - set up GX->MX or CX->MX rail voltage
510 * dependencies. Second rail voltage shall be equal to or higher than
511 * primary rail voltage. VLVL table index was used by RPMh for PMIC
512 * voltage setting.
513 * @votes: Pointer to a ARC vote descriptor
514 * @pri_rail: Pointer to primary power rail VLVL table
515 * @sec_rail: Pointer to second/dependent power rail VLVL table
516 * @vlvl: Pointer to VLVL table being used by GPU or GMU driver, a subset
517 * of pri_rail VLVL table
518 * @num_entries: Valid number of entries in table pointed by "vlvl" parameter
519 */
520static int setup_volt_dependency_tbl(struct arc_vote_desc *votes,
521 struct rpmh_arc_vals *pri_rail, struct rpmh_arc_vals *sec_rail,
522 unsigned int *vlvl, unsigned int num_entries)
523{
524 int i, j, k;
525 uint16_t cur_vlvl;
526
527 /* i tracks current KGSL GPU frequency table entry
528 * j tracks second rail voltage table entry
529 * k tracks primary rail voltage table entry
530 */
531 for (i = 0, k = 0; i < num_entries; k++) {
532 if (pri_rail->val[k] != vlvl[i]) {
533 if (k >= pri_rail->num)
534 return -EINVAL;
535 continue;
536 }
537 votes[i].pri_idx = k;
538 votes[i].vlvl = vlvl[i];
539 cur_vlvl = vlvl[i];
540
541 /* find index of second rail vlvl array element that
542 * its vlvl >= current vlvl of primary rail
543 */
544 for (j = 0; j < sec_rail->num; j++) {
545 if (sec_rail->val[j] >= cur_vlvl) {
546 votes[i].sec_idx = j;
547 break;
548 }
549 }
550
551 if (j == sec_rail->num)
552 votes[i].sec_idx = j;
553
554 i++;
555 }
556 return 0;
557}
558
559/*
560 * rpmh_arc_votes_init() - initialized RPMh votes needed for rails voltage
561 * scaling by GMU.
562 * @gmu: Pointer to GMU device
563 * @pri_rail: Pointer to primary power rail VLVL table
564 * @sec_rail: Pointer to second/dependent power rail VLVL table
565 * of pri_rail VLVL table
566 * @type: the type of the primary rail, GPU or GMU
567 */
568static int rpmh_arc_votes_init(struct gmu_device *gmu,
569 struct rpmh_arc_vals *pri_rail,
570 struct rpmh_arc_vals *sec_rail,
571 unsigned int type)
572{
573 unsigned int num_freqs;
574 struct arc_vote_desc *votes;
575 unsigned int vlvl_tbl[MAX_GX_LEVELS];
576 unsigned int *freq_tbl;
577 int i, ret;
578 /*
579 * FIXME: remove below two arrays after OPP VLVL query API ready
580 * struct dev_pm_opp *opp;
581 */
582 uint16_t gpu_vlvl[] = {0, 128, 256, 384};
583 uint16_t cx_vlvl[] = {0, 48, 256};
584
585 if (type == GPU_ARC_VOTE) {
586 num_freqs = gmu->num_gpupwrlevels;
587 votes = gmu->rpmh_votes.gx_votes;
588 freq_tbl = gmu->gmu_freqs;
589 } else if (type == GMU_ARC_VOTE) {
590 num_freqs = gmu->num_gmupwrlevels;
591 votes = gmu->rpmh_votes.cx_votes;
592 freq_tbl = gmu->gpu_freqs;
593 } else {
594 return -EINVAL;
595 }
596
597 if (num_freqs > pri_rail->num) {
598 dev_err(&gmu->pdev->dev,
599 "%s defined more DCVS levels than RPMh can support\n",
600 debug_strs[type]);
601 return -EINVAL;
602 }
603
604 /*
605 * FIXME: Find a core's voltage VLVL value based on its frequency
606 * using OPP framework, waiting for David Colin, ETA Jan.
607 */
608 for (i = 0; i < num_freqs; i++) {
609 /*
610 * opp = dev_pm_opp_find_freq_exact(&gmu->pdev->dev,
611 * freq_tbl[i], true);
612 * if (IS_ERR(opp)) {
613 * dev_err(&gmu->pdev->dev,
614 * "Failed to find opp freq %d of %s\n",
615 * freq_tbl[i], debug_strs[type]);
616 * return PTR_ERR(opp);
617 * }
618 * vlvl_tbl[i] = dev_pm_opp_get_voltage(opp);
619 */
620 if (type == GPU_ARC_VOTE)
621 vlvl_tbl[i] = gpu_vlvl[i];
622 else
623 vlvl_tbl[i] = cx_vlvl[i];
624 }
625
626 ret = setup_volt_dependency_tbl(votes,
627 pri_rail, sec_rail, vlvl_tbl, num_freqs);
628
629 if (ret)
630 dev_err(&gmu->pdev->dev, "%s rail volt failed to match DT freqs\n",
631 debug_strs[type]);
632
633 return ret;
634}
635
636/*
637 * build_rpmh_bw_votes() - build TCS commands to vote for bandwidth.
638 * Each command sets frequency of a node along path to DDR or CNOC.
639 * @rpmh_vote: Pointer to RPMh vote needed by GMU to set BW via RPMh
640 * @num_usecases: Number of BW use cases (or BW levels)
641 * @handle: Provided by bus driver. It contains TCS command sets for
642 * all BW use cases of a bus client.
643 */
644static void build_rpmh_bw_votes(struct gmu_bw_votes *rpmh_vote,
645 unsigned int num_usecases, struct msm_bus_tcs_handle handle)
646{
647 struct msm_bus_tcs_usecase *tmp;
648 int i, j;
649
650 for (i = 0; i < num_usecases; i++) {
651 tmp = &handle.usecases[i];
652 for (j = 0; j < tmp->num_cmds; j++) {
653 if (!i) {
654 /*
655 * Wait bitmask and TCS command addresses are
656 * same for all bw use cases. To save data volume
657 * exchanged between driver and GMU, only
658 * transfer bitmasks and TCS command addresses
659 * of first set of bw use case
660 */
661 rpmh_vote->cmds_per_bw_vote = tmp->num_cmds;
662 rpmh_vote->cmds_wait_bitmask =
663 tmp->cmds[j].complete ?
664 rpmh_vote->cmds_wait_bitmask
665 | BIT(i)
666 : rpmh_vote->cmds_wait_bitmask
667 & (~BIT(i));
668 rpmh_vote->cmd_addrs[j] = tmp->cmds[j].addr;
669 }
670 rpmh_vote->cmd_data[i][j] = tmp->cmds[j].data;
671 }
672 }
673}
674
675/*
676 * gmu_bus_vote_init - initialized RPMh votes needed for bw scaling by GMU.
677 * @gmu: Pointer to GMU device
678 * @pwr: Pointer to KGSL power controller
679 */
680static int gmu_bus_vote_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
681{
682 struct msm_bus_tcs_usecase *usecases;
683 struct msm_bus_tcs_handle hdl;
684 struct rpmh_votes_t *votes = &gmu->rpmh_votes;
685 int ret;
686
687 usecases = kcalloc(gmu->num_bwlevels, sizeof(*usecases), GFP_KERNEL);
688 if (!usecases)
689 return -ENOMEM;
690
691 hdl.num_usecases = gmu->num_bwlevels;
692 hdl.usecases = usecases;
693
694 /*
695 * Query TCS command set for each use case defined in GPU b/w table
696 */
697 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->pcl);
698 if (ret)
699 return ret;
700
701 build_rpmh_bw_votes(&votes->ddr_votes, gmu->num_bwlevels, hdl);
702
703 /*
704 *Query CNOC TCS command set for each use case defined in cnoc bw table
705 */
706 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->ccl);
707 if (ret)
708 return ret;
709
710 build_rpmh_bw_votes(&votes->cnoc_votes, gmu->num_cnocbwlevels, hdl);
711
712 kfree(usecases);
713
714 return 0;
715}
716
717int gmu_rpmh_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
718{
719 struct rpmh_arc_vals gfx_arc, cx_arc, mx_arc;
720 int ret;
721
722 /* Populate BW vote table */
723 ret = gmu_bus_vote_init(gmu, pwr);
724 if (ret)
725 return ret;
726
727 /* Populate GPU and GMU frequency vote table */
728 ret = rpmh_arc_cmds(gmu, &gfx_arc, gfx_res_id);
729 if (ret)
730 return ret;
731
732 ret = rpmh_arc_cmds(gmu, &cx_arc, cx_res_id);
733 if (ret)
734 return ret;
735
736 ret = rpmh_arc_cmds(gmu, &mx_arc, mx_res_id);
737 if (ret)
738 return ret;
739
740 ret = rpmh_arc_votes_init(gmu, &gfx_arc, &mx_arc, GPU_ARC_VOTE);
741 if (ret)
742 return ret;
743
744 return rpmh_arc_votes_init(gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE);
745}
746
747static irqreturn_t gmu_irq_handler(int irq, void *data)
748{
749 struct gmu_device *gmu = data;
750 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
751 struct kgsl_hfi *hfi = &gmu->hfi;
752 unsigned int status = 0;
753
754 if (irq == gmu->gmu_interrupt_num) {
755 adreno_read_gmureg(ADRENO_DEVICE(device),
756 ADRENO_REG_GMU_HOST_INTERRUPT_STATUS,
757 &status);
758
759 /* Ignore GMU_INT_RSCC_COMP interrupts */
760 if (status & GMU_INT_WDOG_BITE)
761 dev_err_ratelimited(&gmu->pdev->dev,
762 "GMU watchdog expired interrupt\n");
763 if (status & GMU_INT_DBD_WAKEUP)
764 dev_err_ratelimited(&gmu->pdev->dev,
765 "GMU doorbell interrupt received\n");
766 if (status & GMU_INT_HOST_AHB_BUS_ERR)
767 dev_err_ratelimited(&gmu->pdev->dev,
768 "AHB bus error interrupt received\n");
769
770 adreno_write_gmureg(ADRENO_DEVICE(device),
771 ADRENO_REG_GMU_HOST_INTERRUPT_CLR,
772 status);
773 } else {
774 adreno_read_gmureg(ADRENO_DEVICE(device),
775 ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
776 &status);
777 adreno_write_gmureg(ADRENO_DEVICE(device),
778 ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
779 status);
780
781 if (status & HFI_IRQ_MASK) {
782 if (status & HFI_IRQ_MSGQ_MASK)
783 tasklet_hi_schedule(&hfi->tasklet);
784 } else
785 dev_err_ratelimited(&gmu->pdev->dev,
786 "Unhandled GMU interrupts %x\n",
787 status);
788 }
789
790 return IRQ_HANDLED;
791}
792
793static int gmu_pwrlevel_probe(struct gmu_device *gmu, struct device_node *node)
794{
795 struct device_node *pwrlevel_node, *child;
796
797 pwrlevel_node = of_find_node_by_name(node, "qcom,gmu-pwrlevels");
798
799 if (pwrlevel_node == NULL) {
800 dev_err(&gmu->pdev->dev, "Unable to find 'qcom,gmu-pwrlevels'\n");
801 return -EINVAL;
802 }
803
804 gmu->num_gmupwrlevels = 0;
805
806 for_each_child_of_node(pwrlevel_node, child) {
807 unsigned int index;
808
809 if (of_property_read_u32(child, "reg", &index))
810 return -EINVAL;
811
812 if (index >= MAX_CX_LEVELS) {
813 dev_err(&gmu->pdev->dev, "gmu pwrlevel %d is out of range\n",
814 index);
815 continue;
816 }
817
818 if (index >= gmu->num_gmupwrlevels)
819 gmu->num_gmupwrlevels = index + 1;
820
821 if (of_property_read_u32(child, "qcom,gmu-freq",
822 &gmu->gmu_freqs[index]))
823 return -EINVAL;
824 }
825
826 return 0;
827}
828
829static int gmu_reg_probe(struct gmu_device *gmu, const char *name, bool is_gmu)
830{
831 struct resource *res;
832
833 res = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM, name);
834 if (res == NULL) {
835 dev_err(&gmu->pdev->dev,
836 "platform_get_resource %s failed\n", name);
837 return -EINVAL;
838 }
839
840 if (res->start == 0 || resource_size(res) == 0) {
841 dev_err(&gmu->pdev->dev,
842 "dev %d %s invalid register region\n",
843 gmu->pdev->dev.id, name);
844 return -EINVAL;
845 }
846
847 if (is_gmu) {
848 if (!devm_request_mem_region(&gmu->pdev->dev, res->start,
849 resource_size(res),
850 res->name)) {
851 dev_err(&gmu->pdev->dev,
852 "GMU regs request mem region failed\n");
853 return -ENOMEM;
854 }
855
856 gmu->reg_phys = res->start;
857 gmu->reg_len = resource_size(res);
858 gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
859 resource_size(res));
860
861 if (gmu->reg_virt == NULL) {
862 dev_err(&gmu->pdev->dev, "GMU regs ioremap failed\n");
863 return -ENODEV;
864 }
865
866 } else {
867 gmu->pdc_reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
868 resource_size(res));
869 if (gmu->pdc_reg_virt == NULL) {
870 dev_err(&gmu->pdev->dev, "PDC regs ioremap failed\n");
871 return -ENODEV;
872 }
873 }
874
875 return 0;
876}
877
878static int gmu_clocks_probe(struct gmu_device *gmu, struct device_node *node)
879{
880 const char *cname;
881 struct property *prop;
882 struct clk *c;
883 int i = 0;
884
885 of_property_for_each_string(node, "clock-names", prop, cname) {
886 c = devm_clk_get(&gmu->pdev->dev, cname);
887
888 if (IS_ERR(c)) {
889 dev_err(&gmu->pdev->dev,
890 "dt: Couldn't get GMU clock: %s\n", cname);
891 return PTR_ERR(c);
892 }
893
894 if (i >= MAX_GMU_CLKS) {
895 dev_err(&gmu->pdev->dev,
896 "dt: too many GMU clocks defined\n");
897 return -EINVAL;
898 }
899
900 gmu->clks[i++] = c;
901 }
902
903 return 0;
904}
905
906static int gmu_gpu_bw_probe(struct gmu_device *gmu)
907{
908 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
909 struct msm_bus_scale_pdata *bus_scale_table;
910
911 bus_scale_table = msm_bus_cl_get_pdata(device->pdev);
912 if (bus_scale_table == NULL) {
913 dev_err(&gmu->pdev->dev, "dt: cannot get bus table\n");
914 return -ENODEV;
915 }
916
917 gmu->num_bwlevels = bus_scale_table->num_usecases;
918 gmu->pcl = msm_bus_scale_register_client(bus_scale_table);
919 if (!gmu->pcl) {
920 dev_err(&gmu->pdev->dev, "dt: cannot register bus client\n");
921 return -ENODEV;
922 }
923
924 return 0;
925}
926
927static int gmu_cnoc_bw_probe(struct gmu_device *gmu)
928{
929 struct msm_bus_scale_pdata *cnoc_table;
930
931 cnoc_table = msm_bus_cl_get_pdata(gmu->pdev);
932 if (cnoc_table == NULL) {
933 dev_err(&gmu->pdev->dev, "dt: cannot get cnoc table\n");
934 return -ENODEV;
935 }
936
937 gmu->num_cnocbwlevels = cnoc_table->num_usecases;
938 gmu->ccl = msm_bus_scale_register_client(cnoc_table);
939 if (!gmu->ccl) {
940 dev_err(&gmu->pdev->dev, "dt: cannot register cnoc client\n");
941 return -ENODEV;
942 }
943
944 return 0;
945}
946
947static int gmu_regulators_probe(struct gmu_device *gmu,
948 struct device_node *node)
949{
950 const char *name;
951 struct property *prop;
952 struct device *dev = &gmu->pdev->dev;
953 int ret = 0;
954
955 of_property_for_each_string(node, "regulator-names", prop, name) {
956 if (!strcmp(name, "vddcx")) {
957 gmu->cx_gdsc = devm_regulator_get(dev, name);
958 if (IS_ERR(gmu->cx_gdsc)) {
959 ret = PTR_ERR(gmu->cx_gdsc);
960 dev_err(dev, "dt: GMU couldn't get CX gdsc\n");
961 gmu->cx_gdsc = NULL;
962 return ret;
963 }
964 } else if (!strcmp(name, "vdd")) {
965 gmu->gx_gdsc = devm_regulator_get(dev, name);
966 if (IS_ERR(gmu->gx_gdsc)) {
967 ret = PTR_ERR(gmu->gx_gdsc);
968 dev_err(dev, "dt: GMU couldn't get GX gdsc\n");
969 gmu->gx_gdsc = NULL;
970 return ret;
971 }
972 } else {
973 dev_err(dev, "dt: Unknown GMU regulator: %s\n", name);
974 return -ENODEV;
975 }
976 }
977
978 return 0;
979}
980
981/* Do not access any GMU registers in GMU probe function */
982int gmu_probe(struct kgsl_device *device)
983{
984 struct device_node *node;
985 struct gmu_device *gmu = &device->gmu;
986 struct gmu_memdesc *mem_addr = NULL;
987 struct kgsl_hfi *hfi = &gmu->hfi;
988 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
989 int i = 0, ret = -ENXIO;
990
991 node = of_find_compatible_node(device->pdev->dev.of_node,
992 NULL, "qcom,gpu-gmu");
993
994 if (node == NULL)
995 return ret;
996
997 device->gmu.pdev = of_find_device_by_node(node);
998
999 /* Set up GMU regulators */
1000 ret = gmu_regulators_probe(gmu, node);
1001 if (ret)
1002 goto error;
1003
1004 /* Set up GMU clocks */
1005 ret = gmu_clocks_probe(gmu, node);
1006 if (ret)
1007 goto error;
1008
1009 /* Set up GMU IOMMU and shared memory with GMU */
1010 ret = gmu_memory_probe(&device->gmu, node);
1011 if (ret)
1012 goto error;
1013 mem_addr = gmu->hfi_mem;
1014
1015 /* Map and reserve GMU CSRs registers */
1016 ret = gmu_reg_probe(gmu, "kgsl_gmu_reg", true);
1017 if (ret)
1018 goto error;
1019
1020 ret = gmu_reg_probe(gmu, "kgsl_gmu_pdc_reg", false);
1021 if (ret)
1022 goto error;
1023
1024 gmu->gmu2gpu_offset = (gmu->reg_phys - device->reg_phys) >> 2;
1025
1026 /* Initialize HFI GMU interrupts */
1027 hfi->hfi_interrupt_num = platform_get_irq_byname(gmu->pdev,
1028 "kgsl_hfi_irq");
1029 ret = devm_request_irq(&gmu->pdev->dev,
1030 hfi->hfi_interrupt_num,
1031 gmu_irq_handler, IRQF_TRIGGER_HIGH,
1032 "GMU", gmu);
1033 if (ret) {
1034 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1035 hfi->hfi_interrupt_num, ret);
1036 goto error;
1037 }
1038
1039 gmu->gmu_interrupt_num = platform_get_irq_byname(gmu->pdev,
1040 "kgsl_gmu_irq");
1041 ret = devm_request_irq(&gmu->pdev->dev,
1042 gmu->gmu_interrupt_num,
1043 gmu_irq_handler, IRQF_TRIGGER_HIGH,
1044 "GMU", gmu);
1045 if (ret) {
1046 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1047 gmu->gmu_interrupt_num, ret);
1048 goto error;
1049 }
1050
1051 /* Don't enable GMU interrupts until GMU started */
1052 disable_irq(gmu->gmu_interrupt_num);
1053 disable_irq(hfi->hfi_interrupt_num);
1054
1055 tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long)gmu);
1056 INIT_LIST_HEAD(&hfi->msglist);
1057 spin_lock_init(&hfi->msglock);
1058
1059 /* Retrieves GMU/GPU power level configurations*/
1060 ret = gmu_pwrlevel_probe(gmu, node);
1061 if (ret)
1062 goto error;
1063
1064 gmu->num_gpupwrlevels = pwr->num_pwrlevels;
1065
1066 for (i = 0; i < gmu->num_gpupwrlevels; i++) {
1067 int j = gmu->num_gpupwrlevels - 1 - i;
1068
1069 gmu->gpu_freqs[i] = pwr->pwrlevels[j].gpu_freq;
1070 }
1071
1072 /* Initializes GPU b/w levels configuration */
1073 ret = gmu_gpu_bw_probe(gmu);
1074 if (ret)
1075 goto error;
1076
1077 /* Initialize GMU CNOC b/w levels configuration */
1078 ret = gmu_cnoc_bw_probe(gmu);
1079 if (ret)
1080 goto error;
1081
1082 /* Populates RPMh configurations */
1083 ret = gmu_rpmh_init(gmu, pwr);
1084 if (ret)
1085 goto error;
1086
1087 hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
1088
1089 gmu->idle_level = GPU_HW_CGC;
1090
1091 return 0;
1092
1093error:
1094 gmu_remove(device);
1095 return ret;
1096}
1097
1098
1099
1100static int gmu_enable_clks(struct gmu_device *gmu)
1101{
1102 int ret, j = 0;
1103
1104 if (IS_ERR_OR_NULL(gmu->clks[0]))
1105 return -EINVAL;
1106
1107 ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1108 if (ret) {
1109 dev_err(&gmu->pdev->dev, "fail to set default GMU clk freq %d\n",
1110 gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1111 return ret;
1112 }
1113
1114 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1115 ret = clk_prepare_enable(gmu->clks[j]);
1116 if (ret) {
1117 dev_err(&gmu->pdev->dev,
1118 "fail to enable gpucc clk idx %d\n",
1119 j);
1120 return ret;
1121 }
1122 j++;
1123 }
1124
1125 set_bit(GMU_CLK_ON, &gmu->flags);
1126 return 0;
1127}
1128
1129static int gmu_disable_clks(struct gmu_device *gmu)
1130{
1131 int ret, j = 0;
1132
1133 if (IS_ERR_OR_NULL(gmu->clks[0]))
1134 return 0;
1135
1136 ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[0]);
1137 if (ret) {
1138 dev_err(&gmu->pdev->dev, "fail to reset GMU clk freq %d\n",
1139 gmu->gmu_freqs[0]);
1140 return ret;
1141 }
1142
1143 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1144 clk_disable_unprepare(gmu->clks[j]);
1145 gmu->clks[j] = NULL;
1146 j++;
1147 }
1148
1149 clear_bit(GMU_CLK_ON, &gmu->flags);
1150 return 0;
1151
1152}
1153
1154static int gmu_enable_gdsc(struct gmu_device *gmu)
1155{
1156 int ret;
1157
1158 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1159 return 0;
1160
1161 ret = regulator_enable(gmu->cx_gdsc);
1162 if (ret)
1163 dev_err(&gmu->pdev->dev,
1164 "Failed to enable GMU CX gdsc, error %d\n", ret);
1165
1166 return ret;
1167}
1168
1169static int gmu_disable_gdsc(struct gmu_device *gmu)
1170{
1171 int ret;
1172
1173 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1174 return 0;
1175
1176 ret = regulator_disable(gmu->cx_gdsc);
1177 if (ret)
1178 dev_err(&gmu->pdev->dev,
1179 "Failed to disable GMU CX gdsc, error %d\n", ret);
1180
1181 return ret;
1182}
1183
1184/* To be called to power on both GPU and GMU */
1185int gmu_start(struct kgsl_device *device)
1186{
1187 int ret = 0;
1188 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1189 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1190 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1191 struct gmu_device *gmu = &device->gmu;
1192 struct kgsl_hfi *hfi = &gmu->hfi;
1193 int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
1194
1195 if (!kgsl_gmu_isenabled(device))
1196 return 0;
1197
1198 if (test_bit(GMU_CLK_ON, &gmu->flags))
1199 return 0;
1200
1201 ret = gmu_enable_gdsc(gmu);
1202 if (ret)
1203 return ret;
1204
1205 gmu_enable_clks(gmu);
1206
1207 if (device->state == KGSL_STATE_INIT ||
1208 device->state == KGSL_STATE_SUSPEND) {
1209 /* Convert to RPMh frequency index */
1210 int perf_idx = gmu->num_gpupwrlevels -
1211 pwr->default_pwrlevel - 1;
1212
1213 /* Vote for 300MHz DDR for GMU to init */
1214 ret = msm_bus_scale_client_update_request(gmu->pcl,
1215 bus_level);
1216 if (ret) {
1217 dev_err(&gmu->pdev->dev,
1218 "Failed to allocate gmu b/w\n");
1219 goto error_clks;
1220 }
1221
1222 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1223 GMU_COLD_BOOT, 0);
1224 if (ret)
1225 goto error_bus;
1226
1227 enable_irq(hfi->hfi_interrupt_num);
1228 enable_irq(gmu->gmu_interrupt_num);
1229
1230 ret = hfi_start(gmu, GMU_COLD_BOOT);
1231 if (ret)
1232 goto error_gpu;
1233
1234 /* Send default DCVS level */
1235 ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
1236 if (ret)
1237 goto error_gpu;
1238 } else {
1239 int perf_idx = gmu->num_gpupwrlevels - gmu->wakeup_pwrlevel - 1;
1240
1241 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1242 GMU_WARM_BOOT, 0);
1243 if (ret)
1244 goto error_clks;
1245
1246 enable_irq(hfi->hfi_interrupt_num);
1247 enable_irq(gmu->gmu_interrupt_num);
1248
1249 ret = hfi_start(gmu, GMU_WARM_BOOT);
1250 if (ret)
1251 goto error_gpu;
1252
1253 if (gmu->wakeup_pwrlevel != pwr->default_pwrlevel) {
1254 ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
1255 if (ret)
1256 goto error_gpu;
1257 gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
1258 }
1259 }
1260
1261 /*
1262 * OOB to enable power management of GMU.
1263 * In v2, this function call shall move ahead
1264 * of hfi_start() to save power.
1265 */
1266 ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
1267 OOB_CPINIT_CHECK_MASK, OOB_CPINIT_CLEAR_MASK);
1268 gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
1269
1270 if (ret)
1271 goto error_gpu;
1272
1273 if (device->state == KGSL_STATE_INIT ||
1274 device->state == KGSL_STATE_SUSPEND) {
1275 msm_bus_scale_client_update_request(gmu->pcl, 0);
1276 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1277 gpudev->oob_clear(adreno_dev,
1278 OOB_BOOT_SLUMBER_CLEAR_MASK);
1279 }
1280
1281 return 0;
1282
1283error_gpu:
1284 hfi_stop(gmu);
1285 disable_irq(gmu->gmu_interrupt_num);
1286 disable_irq(hfi->hfi_interrupt_num);
1287 if (device->state == KGSL_STATE_INIT ||
1288 device->state == KGSL_STATE_SUSPEND) {
1289 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1290 gpudev->oob_clear(adreno_dev,
1291 OOB_BOOT_SLUMBER_CLEAR_MASK);
1292 }
1293 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
1294error_bus:
1295 if (device->state == KGSL_STATE_INIT ||
1296 device->state == KGSL_STATE_SUSPEND)
1297 msm_bus_scale_client_update_request(gmu->pcl, 0);
1298error_clks:
1299 gmu_disable_clks(gmu);
1300 gmu_disable_gdsc(gmu);
1301 return ret;
1302}
1303
1304/* Caller shall ensure GPU is ready for SLUMBER */
1305void gmu_stop(struct kgsl_device *device)
1306{
1307 struct gmu_device *gmu = &device->gmu;
1308 struct kgsl_hfi *hfi = &gmu->hfi;
1309 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1310 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1311
1312 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1313 return;
1314
1315 /* TODO: Check for conditions to enter slumber */
1316
1317 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
1318
1319 /* Pending message in all queues are abandoned */
1320 hfi_stop(gmu);
1321 clear_bit(GMU_HFI_ON, &gmu->flags);
1322
1323 disable_irq(gmu->gmu_interrupt_num);
1324 disable_irq(hfi->hfi_interrupt_num);
1325
1326 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
1327 gmu_disable_clks(gmu);
1328 gmu_disable_gdsc(gmu);
1329
1330 /* TODO: Vote CX, MX retention off */
1331
1332 msm_bus_scale_client_update_request(gmu->pcl, 0);
1333}
1334
1335void gmu_remove(struct kgsl_device *device)
1336{
1337 struct gmu_device *gmu = &device->gmu;
1338 struct kgsl_hfi *hfi = &gmu->hfi;
1339 int i;
1340
1341 if (!device->gmu.pdev)
1342 return;
1343
1344 tasklet_kill(&hfi->tasklet);
1345
1346 gmu_stop(device);
1347
1348 disable_irq(gmu->gmu_interrupt_num);
1349 disable_irq(hfi->hfi_interrupt_num);
1350 devm_free_irq(&gmu->pdev->dev,
1351 gmu->gmu_interrupt_num, gmu);
1352 devm_free_irq(&gmu->pdev->dev,
1353 hfi->hfi_interrupt_num, gmu);
1354
1355 if (gmu->ccl) {
1356 msm_bus_scale_unregister_client(gmu->ccl);
1357 gmu->ccl = 0;
1358 }
1359
1360 if (gmu->pcl) {
1361 msm_bus_scale_unregister_client(gmu->pcl);
1362 gmu->pcl = 0;
1363 }
1364
1365 if (gmu->pdc_reg_virt) {
1366 devm_iounmap(&gmu->pdev->dev, gmu->pdc_reg_virt);
1367 gmu->pdc_reg_virt = NULL;
1368 }
1369
1370 if (gmu->reg_virt) {
1371 devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
1372 devm_release_mem_region(&gmu->pdev->dev,
1373 gmu->reg_phys, gmu->reg_len);
1374 gmu->reg_virt = NULL;
1375 }
1376
1377 if (gmu->hfi_mem || gmu->dump_mem)
1378 gmu_memory_close(&device->gmu);
1379
1380 for (i = 0; i < MAX_GMU_CLKS; i++) {
1381 if (gmu->clks[i]) {
1382 devm_clk_put(&gmu->pdev->dev, gmu->clks[i]);
1383 gmu->clks[i] = NULL;
1384 }
1385 }
1386
1387 if (gmu->gx_gdsc) {
1388 devm_regulator_put(gmu->gx_gdsc);
1389 gmu->gx_gdsc = NULL;
1390 }
1391
1392 if (gmu->cx_gdsc) {
1393 devm_regulator_put(gmu->cx_gdsc);
1394 gmu->cx_gdsc = NULL;
1395 }
1396
1397 device->gmu.pdev = NULL;
1398}