blob: 2a6e7ddee5c0b8747453e357d5382aa6c7e199df [file] [log] [blame]
Kyle Pieferb1027b02017-02-10 13:58:58 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/iommu.h>
16#include <linux/of_platform.h>
17#include <linux/msm-bus.h>
18#include <linux/msm-bus-board.h>
19#include <linux/pm_opp.h>
20#include <soc/qcom/cmd-db.h>
21
22#include "kgsl_device.h"
23#include "kgsl_gmu.h"
24#include "kgsl_hfi.h"
25#include "a6xx_reg.h"
26#include "adreno.h"
27
28#define GMU_CONTEXT_USER 0
29#define GMU_CONTEXT_KERNEL 1
30#define GMU_KERNEL_ENTRIES 8
31
32enum gmu_iommu_mem_type {
33 GMU_CACHED_CODE,
34 GMU_CACHED_DATA,
35 GMU_NONCACHED_KERNEL,
36 GMU_NONCACHED_USER
37};
38
39/*
40 * GMU virtual memory mapping definitions
41 */
42struct gmu_vma {
43 unsigned int noncached_ustart;
44 unsigned int noncached_usize;
45 unsigned int noncached_kstart;
46 unsigned int noncached_ksize;
47 unsigned int cached_dstart;
48 unsigned int cached_dsize;
49 unsigned int cached_cstart;
50 unsigned int cached_csize;
51 unsigned int image_start;
52};
53
54struct gmu_iommu_context {
55 const char *name;
56 struct device *dev;
57 struct iommu_domain *domain;
58};
59
60#define HFIMEM_SIZE SZ_16K
61
62#define DUMPMEM_SIZE SZ_16K
63
64/* Define target specific GMU VMA configurations */
65static const struct gmu_vma vma = {
66 /* Noncached user segment */
67 0x80000000, SZ_1G,
68 /* Noncached kernel segment */
69 0x60000000, SZ_512M,
70 /* Cached data segment */
71 0x44000, (SZ_256K-SZ_16K),
72 /* Cached code segment */
73 0x0, (SZ_256K-SZ_16K),
74 /* FW image */
75 0x0,
76};
77
78struct gmu_iommu_context gmu_ctx[] = {
79 [GMU_CONTEXT_USER] = { .name = "gmu_user" },
80 [GMU_CONTEXT_KERNEL] = { .name = "gmu_kernel" }
81};
82
83/*
84 * There are a few static memory buffers that are allocated and mapped at boot
85 * time for GMU to function. The buffers are permanent (not freed) after
86 * GPU boot. The size of the buffers are constant and not expected to change.
87 *
88 * We define an array and a simple allocator to keep track of the currently
89 * active SMMU entries of GMU kernel mode context. Each entry is assigned
90 * a unique address inside GMU kernel mode address range. The addresses
91 * are assigned sequentially and aligned to 1MB each.
92 *
93 */
94static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES];
95static unsigned long gmu_kmem_bitmap;
96
Kyle Piefer11a48b62017-03-17 14:53:40 -070097/*
98 * kgsl_gmu_isenabled() - Check if there is a GMU and it is enabled
99 * @device: Pointer to the KGSL device that owns the GMU
100 *
101 * Check if a GMU has been found and successfully probed. Also
102 * check that the feature flag to use a GMU is enabled. Returns
103 * true if both of these conditions are met, otherwise false.
104 */
105bool kgsl_gmu_isenabled(struct kgsl_device *device)
106{
107 struct gmu_device *gmu = &device->gmu;
108 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
109
110 if (gmu->pdev && ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
111 return true;
112
113 return false;
114}
115
Kyle Pieferb1027b02017-02-10 13:58:58 -0800116static int _gmu_iommu_fault_handler(struct device *dev,
117 unsigned long addr, int flags, const char *name)
118{
119 char *fault_type = "unknown";
120
121 if (flags & IOMMU_FAULT_TRANSLATION)
122 fault_type = "translation";
123 else if (flags & IOMMU_FAULT_PERMISSION)
124 fault_type = "permission";
125
126 dev_err(dev, "GMU fault addr = %lX, context=%s (%s %s fault)\n",
127 addr, name,
128 (flags & IOMMU_FAULT_WRITE) ? "write" : "read",
129 fault_type);
130
131 return 0;
132}
133
134static int gmu_kernel_fault_handler(struct iommu_domain *domain,
135 struct device *dev, unsigned long addr, int flags, void *token)
136{
137 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_kernel");
138}
139
140static int gmu_user_fault_handler(struct iommu_domain *domain,
141 struct device *dev, unsigned long addr, int flags, void *token)
142{
143 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_user");
144}
145
146static void free_gmu_mem(struct gmu_device *gmu,
147 struct gmu_memdesc *md)
148{
149 /* Free GMU image memory */
150 if (md->hostptr)
151 dma_free_attrs(&gmu->pdev->dev, (size_t) md->size,
152 (void *)md->hostptr, md->physaddr, 0);
153 memset(md, 0, sizeof(*md));
154}
155
156static int alloc_and_map(struct gmu_device *gmu, unsigned int ctx_id,
157 struct gmu_memdesc *md, unsigned int attrs)
158{
159 int ret;
160 struct iommu_domain *domain;
161
162 domain = gmu_ctx[ctx_id].domain;
163
164 md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size,
165 &md->physaddr, GFP_KERNEL, 0);
166
167 if (md->hostptr == NULL)
168 return -ENOMEM;
169
170 ret = iommu_map(domain, md->gmuaddr,
171 md->physaddr, md->size,
172 attrs);
173
174 if (ret) {
175 dev_err(&gmu->pdev->dev,
176 "gmu map err: gaddr=0x%016llX, paddr=0x%016llX\n",
177 md->gmuaddr, md->physaddr);
178 free_gmu_mem(gmu, md);
179 }
180
181 return ret;
182}
183
184/*
185 * allocate_gmu_image() - allocates & maps memory for FW image, the size
186 * shall come from the loaded f/w file. Firmware image size shall be
187 * less than code cache size. Otherwise, FW may experience performance issue.
188 * @gmu: Pointer to GMU device
189 * @size: Requested allocation size
190 */
191int allocate_gmu_image(struct gmu_device *gmu, unsigned int size)
192{
193 struct gmu_memdesc *md = &gmu->fw_image;
194
195 if (size > vma.cached_csize) {
196 dev_err(&gmu->pdev->dev,
197 "GMU firmware size too big: %d\n", size);
198 return -EINVAL;
199 }
200
201 md->size = size;
202 md->gmuaddr = vma.image_start;
203 md->attr = GMU_CACHED_CODE;
204
205 return alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, IOMMU_READ);
206}
207
208/*
209 * allocate_gmu_kmem() - allocates and maps GMU kernel shared memory
210 * @gmu: Pointer to GMU device
211 * @size: Requested size
212 * @attrs: IOMMU mapping attributes
213 */
214static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
215 unsigned int size, unsigned int attrs)
216{
217 struct gmu_memdesc *md;
218 int ret, entry_idx = find_first_zero_bit(
219 &gmu_kmem_bitmap, GMU_KERNEL_ENTRIES);
220
221 size = PAGE_ALIGN(size);
222
223 if (size > SZ_1M || size == 0) {
224 dev_err(&gmu->pdev->dev,
225 "Requested %d bytes of GMU kernel memory, max=1MB\n",
226 size);
227 return ERR_PTR(-EINVAL);
228 }
229
230 if (entry_idx >= GMU_KERNEL_ENTRIES) {
231 dev_err(&gmu->pdev->dev,
232 "Ran out of GMU kernel mempool slots\n");
233 return ERR_PTR(-EINVAL);
234 }
235
236 /* Allocate GMU virtual memory */
237 md = &gmu_kmem_entries[entry_idx];
238 md->gmuaddr = vma.noncached_kstart + (entry_idx * SZ_1M);
239 set_bit(entry_idx, &gmu_kmem_bitmap);
240 md->attr = GMU_NONCACHED_KERNEL;
241 md->size = size;
242
243 ret = alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, attrs);
244
245 if (ret) {
246 clear_bit(entry_idx, &gmu_kmem_bitmap);
247 md->gmuaddr = 0;
248 return ERR_PTR(ret);
249 }
250
251 return md;
252}
253
254static int gmu_iommu_cb_probe(struct gmu_device *gmu,
255 struct gmu_iommu_context *ctx,
256 struct device_node *node)
257{
258 struct platform_device *pdev = of_find_device_by_node(node);
259 struct device *dev;
260 int ret;
261
262 dev = &pdev->dev;
263
264 ctx->dev = dev;
265 ctx->domain = iommu_domain_alloc(&platform_bus_type);
266 if (ctx->domain == NULL) {
267 dev_err(&gmu->pdev->dev, "gmu iommu fail to alloc %s domain\n",
268 ctx->name);
269 return -ENODEV;
270 }
271
272 ret = iommu_attach_device(ctx->domain, dev);
273 if (ret) {
274 dev_err(&gmu->pdev->dev, "gmu iommu fail to attach %s device\n",
275 ctx->name);
276 iommu_domain_free(ctx->domain);
277 }
278
279 return ret;
280}
281
282static struct {
283 const char *compatible;
284 int index;
285 iommu_fault_handler_t hdlr;
286} cbs[] = {
287 { "qcom,smmu-gmu-user-cb",
288 GMU_CONTEXT_USER,
289 gmu_user_fault_handler,
290 },
291 { "qcom,smmu-gmu-kernel-cb",
292 GMU_CONTEXT_KERNEL,
293 gmu_kernel_fault_handler,
294 },
295};
296
297/*
298 * gmu_iommu_init() - probe IOMMU context banks used by GMU
299 * and attach GMU device
300 * @gmu: Pointer to GMU device
301 * @node: Pointer to GMU device node
302 */
303int gmu_iommu_init(struct gmu_device *gmu, struct device_node *node)
304{
305 struct device_node *child;
306 struct gmu_iommu_context *ctx = NULL;
307 int ret, i;
308
309 of_platform_populate(node, NULL, NULL, &gmu->pdev->dev);
310
311 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
312 child = of_find_compatible_node(node, NULL, cbs[i].compatible);
313 if (child) {
314 ctx = &gmu_ctx[cbs[i].index];
315 ret = gmu_iommu_cb_probe(gmu, ctx, child);
316 if (ret)
317 return ret;
318 iommu_set_fault_handler(ctx->domain,
319 cbs[i].hdlr, ctx);
320 }
321 }
322
323 for (i = 0; i < ARRAY_SIZE(gmu_ctx); i++) {
324 if (gmu_ctx[i].domain == NULL) {
325 dev_err(&gmu->pdev->dev,
326 "Missing GMU %s context bank node\n",
327 gmu_ctx[i].name);
328 return -EINVAL;
329 }
330 }
331
332 return 0;
333}
334
335/*
336 * gmu_kmem_close() - free all kernel memory allocated for GMU and detach GMU
337 * from IOMMU context banks.
338 * @gmu: Pointer to GMU device
339 */
340void gmu_kmem_close(struct gmu_device *gmu)
341{
342 int i;
343 struct gmu_memdesc *md = &gmu->fw_image;
344 struct gmu_iommu_context *ctx = &gmu_ctx[GMU_CONTEXT_KERNEL];
345
346 /* Free GMU image memory */
347 free_gmu_mem(gmu, md);
348
349 /* Unmap image memory */
350 iommu_unmap(ctx->domain,
351 gmu->fw_image.gmuaddr,
352 gmu->fw_image.size);
353
354
355 gmu->hfi_mem = NULL;
356 gmu->dump_mem = NULL;
357
358 /* Unmap all memories in GMU kernel memory pool */
359 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
360 struct gmu_memdesc *memptr = &gmu_kmem_entries[i];
361
362 if (memptr->gmuaddr)
363 iommu_unmap(ctx->domain, memptr->gmuaddr, memptr->size);
364 }
365
366 /* Free GMU shared kernel memory */
367 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
368 md = &gmu_kmem_entries[i];
369 free_gmu_mem(gmu, md);
370 clear_bit(i, &gmu_kmem_bitmap);
371 }
372
373 /* Detach the device from SMMU context bank */
374 iommu_detach_device(ctx->domain, ctx->dev);
375
376 /* free kernel mem context */
377 iommu_domain_free(ctx->domain);
378}
379
380void gmu_memory_close(struct gmu_device *gmu)
381{
382 gmu_kmem_close(gmu);
383 /* Free user memory context */
384 iommu_domain_free(gmu_ctx[GMU_CONTEXT_USER].domain);
385
386}
387
388/*
389 * gmu_memory_probe() - probe GMU IOMMU context banks and allocate memory
390 * to share with GMU in kernel mode.
391 * @gmu: Pointer to GMU device
392 * @node: Pointer to GMU device node
393 */
394int gmu_memory_probe(struct gmu_device *gmu, struct device_node *node)
395{
396 int ret;
397
398 ret = gmu_iommu_init(gmu, node);
399 if (ret)
400 return ret;
401
402 /* Allocates & maps memory for HFI */
403 gmu->hfi_mem = allocate_gmu_kmem(gmu, HFIMEM_SIZE,
404 (IOMMU_READ | IOMMU_WRITE));
405 if (IS_ERR(gmu->hfi_mem)) {
406 ret = PTR_ERR(gmu->hfi_mem);
407 goto err_ret;
408 }
409
410 /* Allocates & maps GMU crash dump memory */
411 gmu->dump_mem = allocate_gmu_kmem(gmu, DUMPMEM_SIZE,
412 (IOMMU_READ | IOMMU_WRITE));
413 if (IS_ERR(gmu->dump_mem)) {
414 ret = PTR_ERR(gmu->dump_mem);
415 goto err_ret;
416 }
417
418 return 0;
419err_ret:
420 gmu_memory_close(gmu);
421 return ret;
422}
423
424/*
425 * gmu_dcvs_set() - request GMU to change GPU frequency and/or bandwidth.
426 * @gmu: Pointer to GMU device
427 * @gpu_pwrlevel: index to GPU DCVS table used by KGSL
428 * @bus_level: index to GPU bus table used by KGSL
429 *
430 * The function converts GPU power level and bus level index used by KGSL
431 * to index being used by GMU/RPMh.
432 */
433int gmu_dcvs_set(struct gmu_device *gmu,
434 unsigned int gpu_pwrlevel, unsigned int bus_level)
435{
436 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
437 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
438 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
439 int perf_idx = INVALID_DCVS_IDX, bw_idx = INVALID_DCVS_IDX;
440
441 if (gpu_pwrlevel < gmu->num_gpupwrlevels)
442 perf_idx = gmu->num_gpupwrlevels - gpu_pwrlevel - 1;
443
444 if (bus_level < gmu->num_bwlevels)
445 bw_idx = bus_level;
446
447 if ((perf_idx == INVALID_DCVS_IDX) &&
448 (bw_idx == INVALID_DCVS_IDX))
449 return -EINVAL;
450
451 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
452 return gpudev->rpmh_gpu_pwrctrl(adreno_dev,
453 GMU_DCVS_NOHFI, perf_idx, bw_idx);
454
455 return hfi_send_dcvs_vote(gmu, perf_idx, bw_idx, ACK_NONBLOCK);
456}
457
458struct rpmh_arc_vals {
459 unsigned int num;
460 uint16_t val[MAX_GX_LEVELS];
461};
462
463static const char gfx_res_id[] = "gfx.lvl";
464static const char cx_res_id[] = "cx.lvl";
465static const char mx_res_id[] = "mx.lvl";
466
467enum rpmh_vote_type {
468 GPU_ARC_VOTE = 0,
469 GMU_ARC_VOTE,
470 INVALID_ARC_VOTE,
471};
472
473static const char debug_strs[][8] = {
474 [GPU_ARC_VOTE] = "gpu",
475 [GMU_ARC_VOTE] = "gmu",
476};
477
478/*
479 * rpmh_arc_cmds() - query RPMh command database for GX/CX/MX rail
480 * VLVL tables. The index of table will be used by GMU to vote rail
481 * voltage.
482 *
483 * @gmu: Pointer to GMU device
484 * @arc: Pointer to RPMh rail controller (ARC) voltage table
485 * @res_id: Pointer to 8 char array that contains rail name
486 */
487static int rpmh_arc_cmds(struct gmu_device *gmu,
488 struct rpmh_arc_vals *arc, const char *res_id)
489{
490 unsigned int len;
491
492 len = cmd_db_get_aux_data_len(res_id);
493
494 if (len > (MAX_GX_LEVELS << 1)) {
495 /* CmdDB VLVL table size in bytes is too large */
496 dev_err(&gmu->pdev->dev,
497 "gfx cmddb size %d larger than alloc buf %d of %s\n",
498 len, (MAX_GX_LEVELS << 1), res_id);
499 return -EINVAL;
500 }
501
502 cmd_db_get_aux_data(res_id, (uint8_t *)arc->val, len);
503 arc->num = len >> 1;
504
505 return 0;
506}
507
508/*
509 * setup_volt_dependency_tbl() - set up GX->MX or CX->MX rail voltage
510 * dependencies. Second rail voltage shall be equal to or higher than
511 * primary rail voltage. VLVL table index was used by RPMh for PMIC
512 * voltage setting.
513 * @votes: Pointer to a ARC vote descriptor
514 * @pri_rail: Pointer to primary power rail VLVL table
515 * @sec_rail: Pointer to second/dependent power rail VLVL table
516 * @vlvl: Pointer to VLVL table being used by GPU or GMU driver, a subset
517 * of pri_rail VLVL table
518 * @num_entries: Valid number of entries in table pointed by "vlvl" parameter
519 */
520static int setup_volt_dependency_tbl(struct arc_vote_desc *votes,
521 struct rpmh_arc_vals *pri_rail, struct rpmh_arc_vals *sec_rail,
522 unsigned int *vlvl, unsigned int num_entries)
523{
524 int i, j, k;
525 uint16_t cur_vlvl;
526
527 /* i tracks current KGSL GPU frequency table entry
528 * j tracks second rail voltage table entry
529 * k tracks primary rail voltage table entry
530 */
531 for (i = 0, k = 0; i < num_entries; k++) {
532 if (pri_rail->val[k] != vlvl[i]) {
533 if (k >= pri_rail->num)
534 return -EINVAL;
535 continue;
536 }
537 votes[i].pri_idx = k;
538 votes[i].vlvl = vlvl[i];
539 cur_vlvl = vlvl[i];
540
541 /* find index of second rail vlvl array element that
542 * its vlvl >= current vlvl of primary rail
543 */
544 for (j = 0; j < sec_rail->num; j++) {
545 if (sec_rail->val[j] >= cur_vlvl) {
546 votes[i].sec_idx = j;
547 break;
548 }
549 }
550
551 if (j == sec_rail->num)
552 votes[i].sec_idx = j;
553
554 i++;
555 }
556 return 0;
557}
558
559/*
560 * rpmh_arc_votes_init() - initialized RPMh votes needed for rails voltage
561 * scaling by GMU.
562 * @gmu: Pointer to GMU device
563 * @pri_rail: Pointer to primary power rail VLVL table
564 * @sec_rail: Pointer to second/dependent power rail VLVL table
565 * of pri_rail VLVL table
566 * @type: the type of the primary rail, GPU or GMU
567 */
568static int rpmh_arc_votes_init(struct gmu_device *gmu,
569 struct rpmh_arc_vals *pri_rail,
570 struct rpmh_arc_vals *sec_rail,
571 unsigned int type)
572{
573 unsigned int num_freqs;
574 struct arc_vote_desc *votes;
575 unsigned int vlvl_tbl[MAX_GX_LEVELS];
576 unsigned int *freq_tbl;
577 int i, ret;
578 /*
579 * FIXME: remove below two arrays after OPP VLVL query API ready
580 * struct dev_pm_opp *opp;
581 */
582 uint16_t gpu_vlvl[] = {0, 128, 256, 384};
583 uint16_t cx_vlvl[] = {0, 48, 256};
584
585 if (type == GPU_ARC_VOTE) {
586 num_freqs = gmu->num_gpupwrlevels;
587 votes = gmu->rpmh_votes.gx_votes;
588 freq_tbl = gmu->gmu_freqs;
589 } else if (type == GMU_ARC_VOTE) {
590 num_freqs = gmu->num_gmupwrlevels;
591 votes = gmu->rpmh_votes.cx_votes;
592 freq_tbl = gmu->gpu_freqs;
593 } else {
594 return -EINVAL;
595 }
596
597 if (num_freqs > pri_rail->num) {
598 dev_err(&gmu->pdev->dev,
599 "%s defined more DCVS levels than RPMh can support\n",
600 debug_strs[type]);
601 return -EINVAL;
602 }
603
604 /*
605 * FIXME: Find a core's voltage VLVL value based on its frequency
606 * using OPP framework, waiting for David Colin, ETA Jan.
607 */
608 for (i = 0; i < num_freqs; i++) {
609 /*
610 * opp = dev_pm_opp_find_freq_exact(&gmu->pdev->dev,
611 * freq_tbl[i], true);
612 * if (IS_ERR(opp)) {
613 * dev_err(&gmu->pdev->dev,
614 * "Failed to find opp freq %d of %s\n",
615 * freq_tbl[i], debug_strs[type]);
616 * return PTR_ERR(opp);
617 * }
618 * vlvl_tbl[i] = dev_pm_opp_get_voltage(opp);
619 */
620 if (type == GPU_ARC_VOTE)
621 vlvl_tbl[i] = gpu_vlvl[i];
622 else
623 vlvl_tbl[i] = cx_vlvl[i];
624 }
625
626 ret = setup_volt_dependency_tbl(votes,
627 pri_rail, sec_rail, vlvl_tbl, num_freqs);
628
629 if (ret)
630 dev_err(&gmu->pdev->dev, "%s rail volt failed to match DT freqs\n",
631 debug_strs[type]);
632
633 return ret;
634}
635
636/*
637 * build_rpmh_bw_votes() - build TCS commands to vote for bandwidth.
638 * Each command sets frequency of a node along path to DDR or CNOC.
639 * @rpmh_vote: Pointer to RPMh vote needed by GMU to set BW via RPMh
640 * @num_usecases: Number of BW use cases (or BW levels)
641 * @handle: Provided by bus driver. It contains TCS command sets for
642 * all BW use cases of a bus client.
643 */
644static void build_rpmh_bw_votes(struct gmu_bw_votes *rpmh_vote,
645 unsigned int num_usecases, struct msm_bus_tcs_handle handle)
646{
647 struct msm_bus_tcs_usecase *tmp;
648 int i, j;
649
650 for (i = 0; i < num_usecases; i++) {
651 tmp = &handle.usecases[i];
652 for (j = 0; j < tmp->num_cmds; j++) {
653 if (!i) {
654 /*
655 * Wait bitmask and TCS command addresses are
656 * same for all bw use cases. To save data volume
657 * exchanged between driver and GMU, only
658 * transfer bitmasks and TCS command addresses
659 * of first set of bw use case
660 */
661 rpmh_vote->cmds_per_bw_vote = tmp->num_cmds;
662 rpmh_vote->cmds_wait_bitmask =
663 tmp->cmds[j].complete ?
664 rpmh_vote->cmds_wait_bitmask
665 | BIT(i)
666 : rpmh_vote->cmds_wait_bitmask
667 & (~BIT(i));
668 rpmh_vote->cmd_addrs[j] = tmp->cmds[j].addr;
669 }
670 rpmh_vote->cmd_data[i][j] = tmp->cmds[j].data;
671 }
672 }
673}
674
675/*
676 * gmu_bus_vote_init - initialized RPMh votes needed for bw scaling by GMU.
677 * @gmu: Pointer to GMU device
678 * @pwr: Pointer to KGSL power controller
679 */
680static int gmu_bus_vote_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
681{
682 struct msm_bus_tcs_usecase *usecases;
683 struct msm_bus_tcs_handle hdl;
684 struct rpmh_votes_t *votes = &gmu->rpmh_votes;
685 int ret;
686
687 usecases = kcalloc(gmu->num_bwlevels, sizeof(*usecases), GFP_KERNEL);
688 if (!usecases)
689 return -ENOMEM;
690
691 hdl.num_usecases = gmu->num_bwlevels;
692 hdl.usecases = usecases;
693
694 /*
695 * Query TCS command set for each use case defined in GPU b/w table
696 */
697 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->pcl);
698 if (ret)
699 return ret;
700
701 build_rpmh_bw_votes(&votes->ddr_votes, gmu->num_bwlevels, hdl);
702
703 /*
704 *Query CNOC TCS command set for each use case defined in cnoc bw table
705 */
706 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->ccl);
707 if (ret)
708 return ret;
709
710 build_rpmh_bw_votes(&votes->cnoc_votes, gmu->num_cnocbwlevels, hdl);
711
712 kfree(usecases);
713
714 return 0;
715}
716
717int gmu_rpmh_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
718{
719 struct rpmh_arc_vals gfx_arc, cx_arc, mx_arc;
720 int ret;
721
722 /* Populate BW vote table */
723 ret = gmu_bus_vote_init(gmu, pwr);
724 if (ret)
725 return ret;
726
727 /* Populate GPU and GMU frequency vote table */
728 ret = rpmh_arc_cmds(gmu, &gfx_arc, gfx_res_id);
729 if (ret)
730 return ret;
731
732 ret = rpmh_arc_cmds(gmu, &cx_arc, cx_res_id);
733 if (ret)
734 return ret;
735
736 ret = rpmh_arc_cmds(gmu, &mx_arc, mx_res_id);
737 if (ret)
738 return ret;
739
740 ret = rpmh_arc_votes_init(gmu, &gfx_arc, &mx_arc, GPU_ARC_VOTE);
741 if (ret)
742 return ret;
743
744 return rpmh_arc_votes_init(gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE);
745}
746
747static irqreturn_t gmu_irq_handler(int irq, void *data)
748{
749 struct gmu_device *gmu = data;
750 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700751 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800752 unsigned int status = 0;
753
Kyle Piefere7b06b42017-04-06 13:53:01 -0700754 adreno_read_gmureg(ADRENO_DEVICE(device),
755 ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, &status);
756 adreno_write_gmureg(ADRENO_DEVICE(device),
757 ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800758
Kyle Piefere7b06b42017-04-06 13:53:01 -0700759 /* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700760 if (status & GMU_INT_WDOG_BITE) {
Kyle Piefere7b06b42017-04-06 13:53:01 -0700761 dev_err_ratelimited(&gmu->pdev->dev,
762 "GMU watchdog expired interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700763 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
764 adreno_dispatcher_schedule(device);
765 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700766 if (status & GMU_INT_HOST_AHB_BUS_ERR)
767 dev_err_ratelimited(&gmu->pdev->dev,
768 "AHB bus error interrupt received\n");
769 if (status & ~GMU_AO_INT_MASK)
770 dev_err_ratelimited(&gmu->pdev->dev,
771 "Unhandled GMU interrupts 0x%lx\n",
772 status & ~GMU_AO_INT_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800773
Kyle Piefere7b06b42017-04-06 13:53:01 -0700774 return IRQ_HANDLED;
775}
Kyle Pieferb1027b02017-02-10 13:58:58 -0800776
Kyle Piefere7b06b42017-04-06 13:53:01 -0700777static irqreturn_t hfi_irq_handler(int irq, void *data)
778{
779 struct kgsl_hfi *hfi = data;
780 struct gmu_device *gmu = container_of(hfi, struct gmu_device, hfi);
781 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700782 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -0700783 unsigned int status = 0;
784
785 adreno_read_gmureg(ADRENO_DEVICE(device),
786 ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
787 adreno_write_gmureg(ADRENO_DEVICE(device),
788 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status);
789
790 if (status & HFI_IRQ_MSGQ_MASK)
791 tasklet_hi_schedule(&hfi->tasklet);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700792 if (status & HFI_IRQ_CM3_FAULT_MASK) {
Kyle Piefere7b06b42017-04-06 13:53:01 -0700793 dev_err_ratelimited(&gmu->pdev->dev,
794 "GMU CM3 fault interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700795 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
796 adreno_dispatcher_schedule(device);
797 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700798 if (status & ~HFI_IRQ_MASK)
799 dev_err_ratelimited(&gmu->pdev->dev,
800 "Unhandled HFI interrupts 0x%lx\n",
801 status & ~HFI_IRQ_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800802
803 return IRQ_HANDLED;
804}
805
806static int gmu_pwrlevel_probe(struct gmu_device *gmu, struct device_node *node)
807{
808 struct device_node *pwrlevel_node, *child;
809
810 pwrlevel_node = of_find_node_by_name(node, "qcom,gmu-pwrlevels");
811
812 if (pwrlevel_node == NULL) {
813 dev_err(&gmu->pdev->dev, "Unable to find 'qcom,gmu-pwrlevels'\n");
814 return -EINVAL;
815 }
816
817 gmu->num_gmupwrlevels = 0;
818
819 for_each_child_of_node(pwrlevel_node, child) {
820 unsigned int index;
821
822 if (of_property_read_u32(child, "reg", &index))
823 return -EINVAL;
824
825 if (index >= MAX_CX_LEVELS) {
826 dev_err(&gmu->pdev->dev, "gmu pwrlevel %d is out of range\n",
827 index);
828 continue;
829 }
830
831 if (index >= gmu->num_gmupwrlevels)
832 gmu->num_gmupwrlevels = index + 1;
833
834 if (of_property_read_u32(child, "qcom,gmu-freq",
835 &gmu->gmu_freqs[index]))
836 return -EINVAL;
837 }
838
839 return 0;
840}
841
842static int gmu_reg_probe(struct gmu_device *gmu, const char *name, bool is_gmu)
843{
844 struct resource *res;
845
846 res = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM, name);
847 if (res == NULL) {
848 dev_err(&gmu->pdev->dev,
849 "platform_get_resource %s failed\n", name);
850 return -EINVAL;
851 }
852
853 if (res->start == 0 || resource_size(res) == 0) {
854 dev_err(&gmu->pdev->dev,
855 "dev %d %s invalid register region\n",
856 gmu->pdev->dev.id, name);
857 return -EINVAL;
858 }
859
860 if (is_gmu) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800861 gmu->reg_phys = res->start;
862 gmu->reg_len = resource_size(res);
863 gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
864 resource_size(res));
865
866 if (gmu->reg_virt == NULL) {
867 dev_err(&gmu->pdev->dev, "GMU regs ioremap failed\n");
868 return -ENODEV;
869 }
870
871 } else {
872 gmu->pdc_reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
873 resource_size(res));
874 if (gmu->pdc_reg_virt == NULL) {
875 dev_err(&gmu->pdev->dev, "PDC regs ioremap failed\n");
876 return -ENODEV;
877 }
878 }
879
880 return 0;
881}
882
883static int gmu_clocks_probe(struct gmu_device *gmu, struct device_node *node)
884{
885 const char *cname;
886 struct property *prop;
887 struct clk *c;
888 int i = 0;
889
890 of_property_for_each_string(node, "clock-names", prop, cname) {
891 c = devm_clk_get(&gmu->pdev->dev, cname);
892
893 if (IS_ERR(c)) {
894 dev_err(&gmu->pdev->dev,
895 "dt: Couldn't get GMU clock: %s\n", cname);
896 return PTR_ERR(c);
897 }
898
899 if (i >= MAX_GMU_CLKS) {
900 dev_err(&gmu->pdev->dev,
901 "dt: too many GMU clocks defined\n");
902 return -EINVAL;
903 }
904
905 gmu->clks[i++] = c;
906 }
907
908 return 0;
909}
910
911static int gmu_gpu_bw_probe(struct gmu_device *gmu)
912{
913 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
914 struct msm_bus_scale_pdata *bus_scale_table;
915
916 bus_scale_table = msm_bus_cl_get_pdata(device->pdev);
917 if (bus_scale_table == NULL) {
918 dev_err(&gmu->pdev->dev, "dt: cannot get bus table\n");
919 return -ENODEV;
920 }
921
922 gmu->num_bwlevels = bus_scale_table->num_usecases;
923 gmu->pcl = msm_bus_scale_register_client(bus_scale_table);
924 if (!gmu->pcl) {
925 dev_err(&gmu->pdev->dev, "dt: cannot register bus client\n");
926 return -ENODEV;
927 }
928
929 return 0;
930}
931
932static int gmu_cnoc_bw_probe(struct gmu_device *gmu)
933{
934 struct msm_bus_scale_pdata *cnoc_table;
935
936 cnoc_table = msm_bus_cl_get_pdata(gmu->pdev);
937 if (cnoc_table == NULL) {
938 dev_err(&gmu->pdev->dev, "dt: cannot get cnoc table\n");
939 return -ENODEV;
940 }
941
942 gmu->num_cnocbwlevels = cnoc_table->num_usecases;
943 gmu->ccl = msm_bus_scale_register_client(cnoc_table);
944 if (!gmu->ccl) {
945 dev_err(&gmu->pdev->dev, "dt: cannot register cnoc client\n");
946 return -ENODEV;
947 }
948
949 return 0;
950}
951
952static int gmu_regulators_probe(struct gmu_device *gmu,
953 struct device_node *node)
954{
955 const char *name;
956 struct property *prop;
957 struct device *dev = &gmu->pdev->dev;
958 int ret = 0;
959
960 of_property_for_each_string(node, "regulator-names", prop, name) {
961 if (!strcmp(name, "vddcx")) {
962 gmu->cx_gdsc = devm_regulator_get(dev, name);
963 if (IS_ERR(gmu->cx_gdsc)) {
964 ret = PTR_ERR(gmu->cx_gdsc);
965 dev_err(dev, "dt: GMU couldn't get CX gdsc\n");
966 gmu->cx_gdsc = NULL;
967 return ret;
968 }
969 } else if (!strcmp(name, "vdd")) {
970 gmu->gx_gdsc = devm_regulator_get(dev, name);
971 if (IS_ERR(gmu->gx_gdsc)) {
972 ret = PTR_ERR(gmu->gx_gdsc);
973 dev_err(dev, "dt: GMU couldn't get GX gdsc\n");
974 gmu->gx_gdsc = NULL;
975 return ret;
976 }
977 } else {
978 dev_err(dev, "dt: Unknown GMU regulator: %s\n", name);
979 return -ENODEV;
980 }
981 }
982
983 return 0;
984}
985
Kyle Piefere7b06b42017-04-06 13:53:01 -0700986static int gmu_irq_probe(struct gmu_device *gmu)
987{
988 int ret;
989 struct kgsl_hfi *hfi = &gmu->hfi;
990
991 hfi->hfi_interrupt_num = platform_get_irq_byname(gmu->pdev,
992 "kgsl_hfi_irq");
993 ret = devm_request_irq(&gmu->pdev->dev,
994 hfi->hfi_interrupt_num,
995 hfi_irq_handler, IRQF_TRIGGER_HIGH,
996 "HFI", hfi);
997 if (ret) {
998 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
999 hfi->hfi_interrupt_num, ret);
1000 return ret;
1001 }
1002
1003 gmu->gmu_interrupt_num = platform_get_irq_byname(gmu->pdev,
1004 "kgsl_gmu_irq");
1005 ret = devm_request_irq(&gmu->pdev->dev,
1006 gmu->gmu_interrupt_num,
1007 gmu_irq_handler, IRQF_TRIGGER_HIGH,
1008 "GMU", gmu);
1009 if (ret)
1010 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1011 gmu->gmu_interrupt_num, ret);
1012
1013 return ret;
1014}
1015
1016static void gmu_irq_enable(struct kgsl_device *device)
1017{
1018 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1019 struct gmu_device *gmu = &device->gmu;
1020 struct kgsl_hfi *hfi = &gmu->hfi;
1021
1022 /* Clear any pending IRQs before unmasking on GMU */
1023 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1024 0xFFFFFFFF);
1025 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1026 0xFFFFFFFF);
1027
1028 /* Unmask needed IRQs on GMU */
1029 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1030 (unsigned int) ~HFI_IRQ_MASK);
1031 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1032 (unsigned int) ~GMU_AO_INT_MASK);
1033
1034 /* Enable all IRQs on host */
1035 enable_irq(hfi->hfi_interrupt_num);
1036 enable_irq(gmu->gmu_interrupt_num);
1037}
1038
1039static void gmu_irq_disable(struct kgsl_device *device)
1040{
1041 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1042 struct gmu_device *gmu = &device->gmu;
1043 struct kgsl_hfi *hfi = &gmu->hfi;
1044
1045 /* Disable all IRQs on host */
1046 disable_irq(gmu->gmu_interrupt_num);
1047 disable_irq(hfi->hfi_interrupt_num);
1048
1049 /* Mask all IRQs on GMU */
1050 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1051 0xFFFFFFFF);
1052 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1053 0xFFFFFFFF);
1054
1055 /* Clear any pending IRQs before disabling */
1056 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1057 0xFFFFFFFF);
1058 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1059 0xFFFFFFFF);
1060}
1061
Kyle Pieferb1027b02017-02-10 13:58:58 -08001062/* Do not access any GMU registers in GMU probe function */
1063int gmu_probe(struct kgsl_device *device)
1064{
1065 struct device_node *node;
1066 struct gmu_device *gmu = &device->gmu;
1067 struct gmu_memdesc *mem_addr = NULL;
1068 struct kgsl_hfi *hfi = &gmu->hfi;
1069 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Kyle Pieferd3964162017-04-06 15:44:03 -07001070 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001071 int i = 0, ret = -ENXIO;
1072
1073 node = of_find_compatible_node(device->pdev->dev.of_node,
1074 NULL, "qcom,gpu-gmu");
1075
1076 if (node == NULL)
1077 return ret;
1078
1079 device->gmu.pdev = of_find_device_by_node(node);
1080
1081 /* Set up GMU regulators */
1082 ret = gmu_regulators_probe(gmu, node);
1083 if (ret)
1084 goto error;
1085
1086 /* Set up GMU clocks */
1087 ret = gmu_clocks_probe(gmu, node);
1088 if (ret)
1089 goto error;
1090
1091 /* Set up GMU IOMMU and shared memory with GMU */
1092 ret = gmu_memory_probe(&device->gmu, node);
1093 if (ret)
1094 goto error;
1095 mem_addr = gmu->hfi_mem;
1096
1097 /* Map and reserve GMU CSRs registers */
1098 ret = gmu_reg_probe(gmu, "kgsl_gmu_reg", true);
1099 if (ret)
1100 goto error;
1101
1102 ret = gmu_reg_probe(gmu, "kgsl_gmu_pdc_reg", false);
1103 if (ret)
1104 goto error;
1105
1106 gmu->gmu2gpu_offset = (gmu->reg_phys - device->reg_phys) >> 2;
1107
Kyle Piefere7b06b42017-04-06 13:53:01 -07001108 /* Initialize HFI and GMU interrupts */
1109 ret = gmu_irq_probe(gmu);
1110 if (ret)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001111 goto error;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001112
1113 /* Don't enable GMU interrupts until GMU started */
Kyle Piefere7b06b42017-04-06 13:53:01 -07001114 /* We cannot use gmu_irq_disable because it writes registers */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001115 disable_irq(gmu->gmu_interrupt_num);
1116 disable_irq(hfi->hfi_interrupt_num);
1117
1118 tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long)gmu);
1119 INIT_LIST_HEAD(&hfi->msglist);
1120 spin_lock_init(&hfi->msglock);
1121
1122 /* Retrieves GMU/GPU power level configurations*/
1123 ret = gmu_pwrlevel_probe(gmu, node);
1124 if (ret)
1125 goto error;
1126
1127 gmu->num_gpupwrlevels = pwr->num_pwrlevels;
1128
1129 for (i = 0; i < gmu->num_gpupwrlevels; i++) {
1130 int j = gmu->num_gpupwrlevels - 1 - i;
1131
1132 gmu->gpu_freqs[i] = pwr->pwrlevels[j].gpu_freq;
1133 }
1134
1135 /* Initializes GPU b/w levels configuration */
1136 ret = gmu_gpu_bw_probe(gmu);
1137 if (ret)
1138 goto error;
1139
1140 /* Initialize GMU CNOC b/w levels configuration */
1141 ret = gmu_cnoc_bw_probe(gmu);
1142 if (ret)
1143 goto error;
1144
1145 /* Populates RPMh configurations */
1146 ret = gmu_rpmh_init(gmu, pwr);
1147 if (ret)
1148 goto error;
1149
1150 hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
1151
Kyle Pieferd3964162017-04-06 15:44:03 -07001152 /* Set up GMU idle states */
1153 if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT))
1154 gmu->idle_level = GPU_HW_MIN_VOLT;
1155 else if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP))
1156 gmu->idle_level = GPU_HW_NAP;
1157 else if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
1158 gmu->idle_level = GPU_HW_IFPC;
1159 else if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
1160 gmu->idle_level = GPU_HW_SPTP_PC;
1161 else
1162 gmu->idle_level = GPU_HW_ACTIVE;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001163
1164 return 0;
1165
1166error:
1167 gmu_remove(device);
1168 return ret;
1169}
1170
1171
1172
1173static int gmu_enable_clks(struct gmu_device *gmu)
1174{
1175 int ret, j = 0;
1176
1177 if (IS_ERR_OR_NULL(gmu->clks[0]))
1178 return -EINVAL;
1179
1180 ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1181 if (ret) {
1182 dev_err(&gmu->pdev->dev, "fail to set default GMU clk freq %d\n",
1183 gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1184 return ret;
1185 }
1186
1187 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1188 ret = clk_prepare_enable(gmu->clks[j]);
1189 if (ret) {
1190 dev_err(&gmu->pdev->dev,
1191 "fail to enable gpucc clk idx %d\n",
1192 j);
1193 return ret;
1194 }
1195 j++;
1196 }
1197
1198 set_bit(GMU_CLK_ON, &gmu->flags);
1199 return 0;
1200}
1201
1202static int gmu_disable_clks(struct gmu_device *gmu)
1203{
1204 int ret, j = 0;
1205
1206 if (IS_ERR_OR_NULL(gmu->clks[0]))
1207 return 0;
1208
1209 ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[0]);
1210 if (ret) {
1211 dev_err(&gmu->pdev->dev, "fail to reset GMU clk freq %d\n",
1212 gmu->gmu_freqs[0]);
1213 return ret;
1214 }
1215
1216 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1217 clk_disable_unprepare(gmu->clks[j]);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001218 j++;
1219 }
1220
1221 clear_bit(GMU_CLK_ON, &gmu->flags);
1222 return 0;
1223
1224}
1225
1226static int gmu_enable_gdsc(struct gmu_device *gmu)
1227{
1228 int ret;
1229
1230 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1231 return 0;
1232
1233 ret = regulator_enable(gmu->cx_gdsc);
1234 if (ret)
1235 dev_err(&gmu->pdev->dev,
1236 "Failed to enable GMU CX gdsc, error %d\n", ret);
1237
1238 return ret;
1239}
1240
George Shen433b0c72017-06-12 09:44:34 -07001241#define CX_GDSC_TIMEOUT 10 /* ms */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001242static int gmu_disable_gdsc(struct gmu_device *gmu)
1243{
1244 int ret;
George Shen433b0c72017-06-12 09:44:34 -07001245 unsigned long t;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001246
1247 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1248 return 0;
1249
1250 ret = regulator_disable(gmu->cx_gdsc);
George Shen433b0c72017-06-12 09:44:34 -07001251 if (ret) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001252 dev_err(&gmu->pdev->dev,
1253 "Failed to disable GMU CX gdsc, error %d\n", ret);
George Shen433b0c72017-06-12 09:44:34 -07001254 return ret;
1255 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001256
George Shen433b0c72017-06-12 09:44:34 -07001257 /*
1258 * After GX GDSC is off, CX GDSC must be off
1259 * Voting off alone from GPU driver cannot
1260 * Guarantee CX GDSC off. Polling with 10ms
1261 * timeout to ensure
1262 */
1263 t = jiffies + msecs_to_jiffies(CX_GDSC_TIMEOUT);
1264 do {
1265 if (!regulator_is_enabled(gmu->cx_gdsc))
1266 return 0;
1267 udelay(100);
1268
1269 } while (!(time_after(jiffies, t)));
1270
1271 dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout");
1272 return -ETIMEDOUT;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001273}
1274
Kyle Piefere923b7a2017-03-28 17:31:48 -07001275static int gmu_fast_boot(struct kgsl_device *device)
1276{
1277 int ret;
1278 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1279 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1280 struct gmu_device *gmu = &device->gmu;
1281
1282 hfi_stop(gmu);
1283 clear_bit(GMU_HFI_ON, &gmu->flags);
1284
1285 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1286 GMU_RESET, 0);
1287 if (ret)
1288 return ret;
1289
1290 /*FIXME: enabling WD interrupt*/
1291
1292 ret = hfi_start(gmu, GMU_WARM_BOOT);
1293 if (ret)
1294 return ret;
1295
1296 ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
1297 OOB_CPINIT_CHECK_MASK, OOB_CPINIT_CLEAR_MASK);
1298
1299 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1300 gpudev->oob_clear(adreno_dev,
1301 OOB_BOOT_SLUMBER_CLEAR_MASK);
1302
1303 return ret;
1304}
1305
1306static int gmu_suspend(struct kgsl_device *device)
1307{
1308 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1309 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1310 struct gmu_device *gmu = &device->gmu;
1311
1312 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1313 return 0;
1314
1315 /* Pending message in all queues are abandoned */
1316 hfi_stop(gmu);
1317 clear_bit(GMU_HFI_ON, &gmu->flags);
1318 gmu_irq_disable(device);
1319
1320 if (gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0))
1321 return -EINVAL;
1322
1323 gmu_disable_clks(gmu);
1324 gmu_disable_gdsc(gmu);
1325 return 0;
1326}
1327
Kyle Pieferb1027b02017-02-10 13:58:58 -08001328/* To be called to power on both GPU and GMU */
1329int gmu_start(struct kgsl_device *device)
1330{
Kyle Piefere923b7a2017-03-28 17:31:48 -07001331 int ret = 0, perf_idx;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001332 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1333 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1334 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1335 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001336 int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
1337
Kyle Piefere923b7a2017-03-28 17:31:48 -07001338 switch (device->state) {
1339 case KGSL_STATE_INIT:
1340 case KGSL_STATE_SUSPEND:
1341 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1342 gmu_enable_gdsc(gmu);
1343 gmu_enable_clks(gmu);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001344
Kyle Pieferb1027b02017-02-10 13:58:58 -08001345 /* Convert to RPMh frequency index */
Kyle Piefere923b7a2017-03-28 17:31:48 -07001346 perf_idx = gmu->num_gpupwrlevels -
Kyle Pieferb1027b02017-02-10 13:58:58 -08001347 pwr->default_pwrlevel - 1;
1348
1349 /* Vote for 300MHz DDR for GMU to init */
1350 ret = msm_bus_scale_client_update_request(gmu->pcl,
1351 bus_level);
1352 if (ret) {
1353 dev_err(&gmu->pdev->dev,
1354 "Failed to allocate gmu b/w\n");
1355 goto error_clks;
1356 }
1357
1358 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1359 GMU_COLD_BOOT, 0);
1360 if (ret)
1361 goto error_bus;
1362
Kyle Piefere7b06b42017-04-06 13:53:01 -07001363 gmu_irq_enable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001364
1365 ret = hfi_start(gmu, GMU_COLD_BOOT);
1366 if (ret)
1367 goto error_gpu;
1368
1369 /* Send default DCVS level */
1370 ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
1371 if (ret)
1372 goto error_gpu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001373
1374 msm_bus_scale_client_update_request(gmu->pcl, 0);
1375 break;
1376
1377 case KGSL_STATE_SLUMBER:
1378 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1379 gmu_enable_gdsc(gmu);
1380 gmu_enable_clks(gmu);
1381
1382 perf_idx = gmu->num_gpupwrlevels - gmu->wakeup_pwrlevel - 1;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001383
1384 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1385 GMU_WARM_BOOT, 0);
1386 if (ret)
1387 goto error_clks;
1388
Kyle Piefere7b06b42017-04-06 13:53:01 -07001389 gmu_irq_enable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001390
1391 ret = hfi_start(gmu, GMU_WARM_BOOT);
1392 if (ret)
1393 goto error_gpu;
1394
1395 if (gmu->wakeup_pwrlevel != pwr->default_pwrlevel) {
1396 ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
1397 if (ret)
1398 goto error_gpu;
1399 gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
1400 }
Kyle Piefere923b7a2017-03-28 17:31:48 -07001401 break;
1402
1403 case KGSL_STATE_RESET:
1404 if (test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv)) {
1405 gmu_suspend(device);
1406 gmu_enable_gdsc(gmu);
1407 gmu_enable_clks(gmu);
1408
1409 perf_idx = gmu->num_gpupwrlevels -
1410 pwr->active_pwrlevel - 1;
1411
1412 bus_level =
1413 pwr->pwrlevels[pwr->active_pwrlevel].bus_freq;
1414 ret = gpudev->rpmh_gpu_pwrctrl(
1415 adreno_dev, GMU_FW_START, GMU_RESET, 0);
1416 if (ret)
1417 goto error_clks;
1418
1419 gmu_irq_enable(device);
1420
1421 ret = hfi_start(gmu, GMU_WARM_BOOT);
1422 if (ret)
1423 goto error_gpu;
1424
1425 /* Send DCVS level prior to reset*/
1426 ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
1427 if (ret)
1428 goto error_gpu;
1429
1430 ret = gpudev->oob_set(adreno_dev,
1431 OOB_CPINIT_SET_MASK,
1432 OOB_CPINIT_CHECK_MASK,
1433 OOB_CPINIT_CLEAR_MASK);
1434
1435 } else {
1436 gmu_fast_boot(device);
1437 }
1438 break;
1439 default:
1440 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001441 }
1442
1443 /*
1444 * OOB to enable power management of GMU.
1445 * In v2, this function call shall move ahead
1446 * of hfi_start() to save power.
1447 */
Kyle Piefere923b7a2017-03-28 17:31:48 -07001448 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1449 gpudev->oob_clear(adreno_dev,
1450 OOB_BOOT_SLUMBER_CLEAR_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001451
Kyle Piefere923b7a2017-03-28 17:31:48 -07001452 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001453
1454error_gpu:
1455 hfi_stop(gmu);
Kyle Piefere7b06b42017-04-06 13:53:01 -07001456 gmu_irq_disable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001457 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1458 gpudev->oob_clear(adreno_dev,
1459 OOB_BOOT_SLUMBER_CLEAR_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001460 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
1461error_bus:
Kyle Pieferb1027b02017-02-10 13:58:58 -08001462 msm_bus_scale_client_update_request(gmu->pcl, 0);
1463error_clks:
1464 gmu_disable_clks(gmu);
1465 gmu_disable_gdsc(gmu);
1466 return ret;
1467}
1468
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001469#define GMU_IDLE_TIMEOUT 10 /* ms */
1470
Kyle Pieferb1027b02017-02-10 13:58:58 -08001471/* Caller shall ensure GPU is ready for SLUMBER */
1472void gmu_stop(struct kgsl_device *device)
1473{
1474 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001475 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1476 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001477 unsigned long t;
1478 bool idle = false;
George Shenf2d4e052017-05-11 16:28:23 -07001479 unsigned int reg;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001480
1481 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1482 return;
1483
George Shenf2d4e052017-05-11 16:28:23 -07001484 t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
1485 while (!time_after(jiffies, t)) {
1486 adreno_read_gmureg(ADRENO_DEVICE(device),
1487 ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
1488 if (reg == device->gmu.idle_level) {
1489 idle = true;
1490 break;
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001491 }
George Shenf2d4e052017-05-11 16:28:23 -07001492 cpu_relax();
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001493 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001494
1495 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
1496
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001497 if (!idle || (gpudev->wait_for_gmu_idle &&
1498 gpudev->wait_for_gmu_idle(adreno_dev))) {
Kyle Piefercfce3d52017-05-30 17:10:11 -07001499 dev_err(&gmu->pdev->dev, "Stopping GMU before it is idle\n");
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001500 }
1501
Kyle Pieferb1027b02017-02-10 13:58:58 -08001502 /* Pending message in all queues are abandoned */
1503 hfi_stop(gmu);
1504 clear_bit(GMU_HFI_ON, &gmu->flags);
Kyle Piefere7b06b42017-04-06 13:53:01 -07001505 gmu_irq_disable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001506
1507 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
1508 gmu_disable_clks(gmu);
1509 gmu_disable_gdsc(gmu);
1510
1511 /* TODO: Vote CX, MX retention off */
1512
1513 msm_bus_scale_client_update_request(gmu->pcl, 0);
1514}
1515
1516void gmu_remove(struct kgsl_device *device)
1517{
1518 struct gmu_device *gmu = &device->gmu;
1519 struct kgsl_hfi *hfi = &gmu->hfi;
Kyle Piefer8570d512017-04-21 14:50:51 -07001520 int i = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001521
1522 if (!device->gmu.pdev)
1523 return;
1524
1525 tasklet_kill(&hfi->tasklet);
1526
1527 gmu_stop(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -07001528 gmu_irq_disable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001529
Kyle Piefer8570d512017-04-21 14:50:51 -07001530 while ((i < MAX_GMU_CLKS) && gmu->clks[i]) {
1531 gmu->clks[i] = NULL;
1532 i++;
1533 }
1534
Kyle Pieferf7febd62017-03-20 16:49:49 -07001535 if (gmu->gmu_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001536 devm_free_irq(&gmu->pdev->dev,
1537 gmu->gmu_interrupt_num, gmu);
1538 gmu->gmu_interrupt_num = 0;
1539 }
1540
1541 if (hfi->hfi_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001542 devm_free_irq(&gmu->pdev->dev,
Kyle Piefercec5e212017-05-19 13:15:15 -07001543 hfi->hfi_interrupt_num, hfi);
Kyle Pieferf7febd62017-03-20 16:49:49 -07001544 hfi->hfi_interrupt_num = 0;
1545 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001546
1547 if (gmu->ccl) {
1548 msm_bus_scale_unregister_client(gmu->ccl);
1549 gmu->ccl = 0;
1550 }
1551
1552 if (gmu->pcl) {
1553 msm_bus_scale_unregister_client(gmu->pcl);
1554 gmu->pcl = 0;
1555 }
1556
1557 if (gmu->pdc_reg_virt) {
1558 devm_iounmap(&gmu->pdev->dev, gmu->pdc_reg_virt);
1559 gmu->pdc_reg_virt = NULL;
1560 }
1561
1562 if (gmu->reg_virt) {
1563 devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001564 gmu->reg_virt = NULL;
1565 }
1566
1567 if (gmu->hfi_mem || gmu->dump_mem)
1568 gmu_memory_close(&device->gmu);
1569
1570 for (i = 0; i < MAX_GMU_CLKS; i++) {
1571 if (gmu->clks[i]) {
1572 devm_clk_put(&gmu->pdev->dev, gmu->clks[i]);
1573 gmu->clks[i] = NULL;
1574 }
1575 }
1576
1577 if (gmu->gx_gdsc) {
1578 devm_regulator_put(gmu->gx_gdsc);
1579 gmu->gx_gdsc = NULL;
1580 }
1581
1582 if (gmu->cx_gdsc) {
1583 devm_regulator_put(gmu->cx_gdsc);
1584 gmu->cx_gdsc = NULL;
1585 }
1586
1587 device->gmu.pdev = NULL;
1588}