blob: 9b04543ab704467a9530466ab475924f33df4495 [file] [log] [blame]
Kyle Pieferb1027b02017-02-10 13:58:58 -08001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/iommu.h>
16#include <linux/of_platform.h>
17#include <linux/msm-bus.h>
18#include <linux/msm-bus-board.h>
19#include <linux/pm_opp.h>
20#include <soc/qcom/cmd-db.h>
21
22#include "kgsl_device.h"
23#include "kgsl_gmu.h"
24#include "kgsl_hfi.h"
25#include "a6xx_reg.h"
26#include "adreno.h"
27
28#define GMU_CONTEXT_USER 0
29#define GMU_CONTEXT_KERNEL 1
30#define GMU_KERNEL_ENTRIES 8
31
32enum gmu_iommu_mem_type {
33 GMU_CACHED_CODE,
34 GMU_CACHED_DATA,
35 GMU_NONCACHED_KERNEL,
36 GMU_NONCACHED_USER
37};
38
39/*
40 * GMU virtual memory mapping definitions
41 */
42struct gmu_vma {
43 unsigned int noncached_ustart;
44 unsigned int noncached_usize;
45 unsigned int noncached_kstart;
46 unsigned int noncached_ksize;
47 unsigned int cached_dstart;
48 unsigned int cached_dsize;
49 unsigned int cached_cstart;
50 unsigned int cached_csize;
51 unsigned int image_start;
52};
53
George Shena2f7b432017-08-18 12:58:18 -070054static void gmu_snapshot(struct kgsl_device *device);
55
Kyle Pieferb1027b02017-02-10 13:58:58 -080056struct gmu_iommu_context {
57 const char *name;
58 struct device *dev;
59 struct iommu_domain *domain;
60};
61
62#define HFIMEM_SIZE SZ_16K
63
64#define DUMPMEM_SIZE SZ_16K
65
66/* Define target specific GMU VMA configurations */
67static const struct gmu_vma vma = {
68 /* Noncached user segment */
69 0x80000000, SZ_1G,
70 /* Noncached kernel segment */
71 0x60000000, SZ_512M,
72 /* Cached data segment */
73 0x44000, (SZ_256K-SZ_16K),
74 /* Cached code segment */
75 0x0, (SZ_256K-SZ_16K),
76 /* FW image */
77 0x0,
78};
79
80struct gmu_iommu_context gmu_ctx[] = {
81 [GMU_CONTEXT_USER] = { .name = "gmu_user" },
82 [GMU_CONTEXT_KERNEL] = { .name = "gmu_kernel" }
83};
84
85/*
86 * There are a few static memory buffers that are allocated and mapped at boot
87 * time for GMU to function. The buffers are permanent (not freed) after
88 * GPU boot. The size of the buffers are constant and not expected to change.
89 *
90 * We define an array and a simple allocator to keep track of the currently
91 * active SMMU entries of GMU kernel mode context. Each entry is assigned
92 * a unique address inside GMU kernel mode address range. The addresses
93 * are assigned sequentially and aligned to 1MB each.
94 *
95 */
96static struct gmu_memdesc gmu_kmem_entries[GMU_KERNEL_ENTRIES];
97static unsigned long gmu_kmem_bitmap;
98
Kyle Piefer11a48b62017-03-17 14:53:40 -070099/*
100 * kgsl_gmu_isenabled() - Check if there is a GMU and it is enabled
101 * @device: Pointer to the KGSL device that owns the GMU
102 *
103 * Check if a GMU has been found and successfully probed. Also
104 * check that the feature flag to use a GMU is enabled. Returns
105 * true if both of these conditions are met, otherwise false.
106 */
107bool kgsl_gmu_isenabled(struct kgsl_device *device)
108{
109 struct gmu_device *gmu = &device->gmu;
110 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
111
112 if (gmu->pdev && ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
113 return true;
114
115 return false;
116}
117
Kyle Pieferb1027b02017-02-10 13:58:58 -0800118static int _gmu_iommu_fault_handler(struct device *dev,
119 unsigned long addr, int flags, const char *name)
120{
121 char *fault_type = "unknown";
122
123 if (flags & IOMMU_FAULT_TRANSLATION)
124 fault_type = "translation";
125 else if (flags & IOMMU_FAULT_PERMISSION)
126 fault_type = "permission";
127
128 dev_err(dev, "GMU fault addr = %lX, context=%s (%s %s fault)\n",
129 addr, name,
130 (flags & IOMMU_FAULT_WRITE) ? "write" : "read",
131 fault_type);
132
133 return 0;
134}
135
136static int gmu_kernel_fault_handler(struct iommu_domain *domain,
137 struct device *dev, unsigned long addr, int flags, void *token)
138{
139 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_kernel");
140}
141
142static int gmu_user_fault_handler(struct iommu_domain *domain,
143 struct device *dev, unsigned long addr, int flags, void *token)
144{
145 return _gmu_iommu_fault_handler(dev, addr, flags, "gmu_user");
146}
147
148static void free_gmu_mem(struct gmu_device *gmu,
149 struct gmu_memdesc *md)
150{
151 /* Free GMU image memory */
152 if (md->hostptr)
153 dma_free_attrs(&gmu->pdev->dev, (size_t) md->size,
154 (void *)md->hostptr, md->physaddr, 0);
155 memset(md, 0, sizeof(*md));
156}
157
158static int alloc_and_map(struct gmu_device *gmu, unsigned int ctx_id,
159 struct gmu_memdesc *md, unsigned int attrs)
160{
161 int ret;
162 struct iommu_domain *domain;
163
164 domain = gmu_ctx[ctx_id].domain;
165
166 md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size,
167 &md->physaddr, GFP_KERNEL, 0);
168
169 if (md->hostptr == NULL)
170 return -ENOMEM;
171
172 ret = iommu_map(domain, md->gmuaddr,
173 md->physaddr, md->size,
174 attrs);
175
176 if (ret) {
177 dev_err(&gmu->pdev->dev,
178 "gmu map err: gaddr=0x%016llX, paddr=0x%016llX\n",
179 md->gmuaddr, md->physaddr);
180 free_gmu_mem(gmu, md);
181 }
182
183 return ret;
184}
185
186/*
187 * allocate_gmu_image() - allocates & maps memory for FW image, the size
188 * shall come from the loaded f/w file. Firmware image size shall be
189 * less than code cache size. Otherwise, FW may experience performance issue.
190 * @gmu: Pointer to GMU device
191 * @size: Requested allocation size
192 */
193int allocate_gmu_image(struct gmu_device *gmu, unsigned int size)
194{
195 struct gmu_memdesc *md = &gmu->fw_image;
196
197 if (size > vma.cached_csize) {
198 dev_err(&gmu->pdev->dev,
199 "GMU firmware size too big: %d\n", size);
200 return -EINVAL;
201 }
202
203 md->size = size;
204 md->gmuaddr = vma.image_start;
205 md->attr = GMU_CACHED_CODE;
206
207 return alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, IOMMU_READ);
208}
209
210/*
211 * allocate_gmu_kmem() - allocates and maps GMU kernel shared memory
212 * @gmu: Pointer to GMU device
213 * @size: Requested size
214 * @attrs: IOMMU mapping attributes
215 */
216static struct gmu_memdesc *allocate_gmu_kmem(struct gmu_device *gmu,
217 unsigned int size, unsigned int attrs)
218{
219 struct gmu_memdesc *md;
220 int ret, entry_idx = find_first_zero_bit(
221 &gmu_kmem_bitmap, GMU_KERNEL_ENTRIES);
222
223 size = PAGE_ALIGN(size);
224
225 if (size > SZ_1M || size == 0) {
226 dev_err(&gmu->pdev->dev,
227 "Requested %d bytes of GMU kernel memory, max=1MB\n",
228 size);
229 return ERR_PTR(-EINVAL);
230 }
231
232 if (entry_idx >= GMU_KERNEL_ENTRIES) {
233 dev_err(&gmu->pdev->dev,
234 "Ran out of GMU kernel mempool slots\n");
235 return ERR_PTR(-EINVAL);
236 }
237
238 /* Allocate GMU virtual memory */
239 md = &gmu_kmem_entries[entry_idx];
240 md->gmuaddr = vma.noncached_kstart + (entry_idx * SZ_1M);
241 set_bit(entry_idx, &gmu_kmem_bitmap);
242 md->attr = GMU_NONCACHED_KERNEL;
243 md->size = size;
244
245 ret = alloc_and_map(gmu, GMU_CONTEXT_KERNEL, md, attrs);
246
247 if (ret) {
248 clear_bit(entry_idx, &gmu_kmem_bitmap);
249 md->gmuaddr = 0;
250 return ERR_PTR(ret);
251 }
252
253 return md;
254}
255
256static int gmu_iommu_cb_probe(struct gmu_device *gmu,
257 struct gmu_iommu_context *ctx,
258 struct device_node *node)
259{
260 struct platform_device *pdev = of_find_device_by_node(node);
261 struct device *dev;
262 int ret;
263
264 dev = &pdev->dev;
265
266 ctx->dev = dev;
267 ctx->domain = iommu_domain_alloc(&platform_bus_type);
268 if (ctx->domain == NULL) {
269 dev_err(&gmu->pdev->dev, "gmu iommu fail to alloc %s domain\n",
270 ctx->name);
271 return -ENODEV;
272 }
273
274 ret = iommu_attach_device(ctx->domain, dev);
275 if (ret) {
276 dev_err(&gmu->pdev->dev, "gmu iommu fail to attach %s device\n",
277 ctx->name);
278 iommu_domain_free(ctx->domain);
279 }
280
281 return ret;
282}
283
284static struct {
285 const char *compatible;
286 int index;
287 iommu_fault_handler_t hdlr;
288} cbs[] = {
289 { "qcom,smmu-gmu-user-cb",
290 GMU_CONTEXT_USER,
291 gmu_user_fault_handler,
292 },
293 { "qcom,smmu-gmu-kernel-cb",
294 GMU_CONTEXT_KERNEL,
295 gmu_kernel_fault_handler,
296 },
297};
298
299/*
300 * gmu_iommu_init() - probe IOMMU context banks used by GMU
301 * and attach GMU device
302 * @gmu: Pointer to GMU device
303 * @node: Pointer to GMU device node
304 */
305int gmu_iommu_init(struct gmu_device *gmu, struct device_node *node)
306{
307 struct device_node *child;
308 struct gmu_iommu_context *ctx = NULL;
309 int ret, i;
310
311 of_platform_populate(node, NULL, NULL, &gmu->pdev->dev);
312
313 for (i = 0; i < ARRAY_SIZE(cbs); i++) {
314 child = of_find_compatible_node(node, NULL, cbs[i].compatible);
315 if (child) {
316 ctx = &gmu_ctx[cbs[i].index];
317 ret = gmu_iommu_cb_probe(gmu, ctx, child);
318 if (ret)
319 return ret;
320 iommu_set_fault_handler(ctx->domain,
321 cbs[i].hdlr, ctx);
322 }
323 }
324
325 for (i = 0; i < ARRAY_SIZE(gmu_ctx); i++) {
326 if (gmu_ctx[i].domain == NULL) {
327 dev_err(&gmu->pdev->dev,
328 "Missing GMU %s context bank node\n",
329 gmu_ctx[i].name);
330 return -EINVAL;
331 }
332 }
333
334 return 0;
335}
336
337/*
338 * gmu_kmem_close() - free all kernel memory allocated for GMU and detach GMU
339 * from IOMMU context banks.
340 * @gmu: Pointer to GMU device
341 */
342void gmu_kmem_close(struct gmu_device *gmu)
343{
344 int i;
345 struct gmu_memdesc *md = &gmu->fw_image;
346 struct gmu_iommu_context *ctx = &gmu_ctx[GMU_CONTEXT_KERNEL];
347
348 /* Free GMU image memory */
349 free_gmu_mem(gmu, md);
350
351 /* Unmap image memory */
352 iommu_unmap(ctx->domain,
353 gmu->fw_image.gmuaddr,
354 gmu->fw_image.size);
355
356
357 gmu->hfi_mem = NULL;
358 gmu->dump_mem = NULL;
359
360 /* Unmap all memories in GMU kernel memory pool */
361 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
362 struct gmu_memdesc *memptr = &gmu_kmem_entries[i];
363
364 if (memptr->gmuaddr)
365 iommu_unmap(ctx->domain, memptr->gmuaddr, memptr->size);
366 }
367
368 /* Free GMU shared kernel memory */
369 for (i = 0; i < GMU_KERNEL_ENTRIES; i++) {
370 md = &gmu_kmem_entries[i];
371 free_gmu_mem(gmu, md);
372 clear_bit(i, &gmu_kmem_bitmap);
373 }
374
375 /* Detach the device from SMMU context bank */
376 iommu_detach_device(ctx->domain, ctx->dev);
377
378 /* free kernel mem context */
379 iommu_domain_free(ctx->domain);
380}
381
382void gmu_memory_close(struct gmu_device *gmu)
383{
384 gmu_kmem_close(gmu);
385 /* Free user memory context */
386 iommu_domain_free(gmu_ctx[GMU_CONTEXT_USER].domain);
387
388}
389
390/*
391 * gmu_memory_probe() - probe GMU IOMMU context banks and allocate memory
392 * to share with GMU in kernel mode.
393 * @gmu: Pointer to GMU device
394 * @node: Pointer to GMU device node
395 */
396int gmu_memory_probe(struct gmu_device *gmu, struct device_node *node)
397{
398 int ret;
399
400 ret = gmu_iommu_init(gmu, node);
401 if (ret)
402 return ret;
403
404 /* Allocates & maps memory for HFI */
405 gmu->hfi_mem = allocate_gmu_kmem(gmu, HFIMEM_SIZE,
406 (IOMMU_READ | IOMMU_WRITE));
407 if (IS_ERR(gmu->hfi_mem)) {
408 ret = PTR_ERR(gmu->hfi_mem);
409 goto err_ret;
410 }
411
412 /* Allocates & maps GMU crash dump memory */
413 gmu->dump_mem = allocate_gmu_kmem(gmu, DUMPMEM_SIZE,
414 (IOMMU_READ | IOMMU_WRITE));
415 if (IS_ERR(gmu->dump_mem)) {
416 ret = PTR_ERR(gmu->dump_mem);
417 goto err_ret;
418 }
419
420 return 0;
421err_ret:
422 gmu_memory_close(gmu);
423 return ret;
424}
425
426/*
427 * gmu_dcvs_set() - request GMU to change GPU frequency and/or bandwidth.
428 * @gmu: Pointer to GMU device
429 * @gpu_pwrlevel: index to GPU DCVS table used by KGSL
430 * @bus_level: index to GPU bus table used by KGSL
431 *
432 * The function converts GPU power level and bus level index used by KGSL
433 * to index being used by GMU/RPMh.
434 */
435int gmu_dcvs_set(struct gmu_device *gmu,
436 unsigned int gpu_pwrlevel, unsigned int bus_level)
437{
438 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
439 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
440 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
441 int perf_idx = INVALID_DCVS_IDX, bw_idx = INVALID_DCVS_IDX;
George Shena2f7b432017-08-18 12:58:18 -0700442 int ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800443
George Shena2f7b432017-08-18 12:58:18 -0700444 if (gpu_pwrlevel < gmu->num_gpupwrlevels - 1)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800445 perf_idx = gmu->num_gpupwrlevels - gpu_pwrlevel - 1;
446
George Shena2f7b432017-08-18 12:58:18 -0700447 if (bus_level < gmu->num_bwlevels && bus_level > 0)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800448 bw_idx = bus_level;
449
450 if ((perf_idx == INVALID_DCVS_IDX) &&
451 (bw_idx == INVALID_DCVS_IDX))
452 return -EINVAL;
453
George Shena2f7b432017-08-18 12:58:18 -0700454 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
455 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev,
Kyle Pieferb1027b02017-02-10 13:58:58 -0800456 GMU_DCVS_NOHFI, perf_idx, bw_idx);
457
George Shena2f7b432017-08-18 12:58:18 -0700458 if (ret) {
459 dev_err(&gmu->pdev->dev,
460 "Failed to set GPU perf idx %d, bw idx %d\n",
461 perf_idx, bw_idx);
462
463 gmu_snapshot(device);
464 }
465
466 return ret;
467 }
468
Kyle Pieferb1027b02017-02-10 13:58:58 -0800469 return hfi_send_dcvs_vote(gmu, perf_idx, bw_idx, ACK_NONBLOCK);
470}
471
472struct rpmh_arc_vals {
473 unsigned int num;
474 uint16_t val[MAX_GX_LEVELS];
475};
476
477static const char gfx_res_id[] = "gfx.lvl";
478static const char cx_res_id[] = "cx.lvl";
479static const char mx_res_id[] = "mx.lvl";
480
481enum rpmh_vote_type {
482 GPU_ARC_VOTE = 0,
483 GMU_ARC_VOTE,
484 INVALID_ARC_VOTE,
485};
486
487static const char debug_strs[][8] = {
488 [GPU_ARC_VOTE] = "gpu",
489 [GMU_ARC_VOTE] = "gmu",
490};
491
492/*
493 * rpmh_arc_cmds() - query RPMh command database for GX/CX/MX rail
494 * VLVL tables. The index of table will be used by GMU to vote rail
495 * voltage.
496 *
497 * @gmu: Pointer to GMU device
498 * @arc: Pointer to RPMh rail controller (ARC) voltage table
499 * @res_id: Pointer to 8 char array that contains rail name
500 */
501static int rpmh_arc_cmds(struct gmu_device *gmu,
502 struct rpmh_arc_vals *arc, const char *res_id)
503{
504 unsigned int len;
505
506 len = cmd_db_get_aux_data_len(res_id);
507
508 if (len > (MAX_GX_LEVELS << 1)) {
509 /* CmdDB VLVL table size in bytes is too large */
510 dev_err(&gmu->pdev->dev,
511 "gfx cmddb size %d larger than alloc buf %d of %s\n",
512 len, (MAX_GX_LEVELS << 1), res_id);
513 return -EINVAL;
514 }
515
516 cmd_db_get_aux_data(res_id, (uint8_t *)arc->val, len);
George Shen07b4f782017-07-13 10:42:53 -0700517 for (arc->num = 1; arc->num <= MAX_GX_LEVELS; arc->num++) {
518 if (arc->num == MAX_GX_LEVELS ||
519 arc->val[arc->num - 1] >= arc->val[arc->num])
520 break;
521 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800522
523 return 0;
524}
525
526/*
527 * setup_volt_dependency_tbl() - set up GX->MX or CX->MX rail voltage
528 * dependencies. Second rail voltage shall be equal to or higher than
529 * primary rail voltage. VLVL table index was used by RPMh for PMIC
530 * voltage setting.
531 * @votes: Pointer to a ARC vote descriptor
532 * @pri_rail: Pointer to primary power rail VLVL table
533 * @sec_rail: Pointer to second/dependent power rail VLVL table
534 * @vlvl: Pointer to VLVL table being used by GPU or GMU driver, a subset
535 * of pri_rail VLVL table
536 * @num_entries: Valid number of entries in table pointed by "vlvl" parameter
537 */
538static int setup_volt_dependency_tbl(struct arc_vote_desc *votes,
539 struct rpmh_arc_vals *pri_rail, struct rpmh_arc_vals *sec_rail,
540 unsigned int *vlvl, unsigned int num_entries)
541{
542 int i, j, k;
543 uint16_t cur_vlvl;
George Shen07b4f782017-07-13 10:42:53 -0700544 bool found_match;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800545
546 /* i tracks current KGSL GPU frequency table entry
547 * j tracks second rail voltage table entry
548 * k tracks primary rail voltage table entry
549 */
George Shen07b4f782017-07-13 10:42:53 -0700550 for (i = 0; i < num_entries; i++) {
551 found_match = false;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800552
George Shen07b4f782017-07-13 10:42:53 -0700553 /* Look for a primary rail voltage that matches a VLVL level */
554 for (k = 0; k < pri_rail->num; k++) {
555 if (pri_rail->val[k] == vlvl[i]) {
556 votes[i].pri_idx = k;
557 votes[i].vlvl = vlvl[i];
558 cur_vlvl = vlvl[i];
559 found_match = true;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800560 break;
561 }
562 }
563
George Shen07b4f782017-07-13 10:42:53 -0700564 /* If we did not find a matching VLVL level then abort */
565 if (!found_match)
566 return -EINVAL;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800567
George Shen07b4f782017-07-13 10:42:53 -0700568 /*
569 * Look for a secondary rail index whose VLVL value
570 * is greater than or equal to the VLVL value of the
571 * corresponding index of the primary rail
572 */
573 for (j = 0; j < sec_rail->num; j++) {
574 if (sec_rail->val[j] >= cur_vlvl ||
575 j + 1 == sec_rail->num) {
576 votes[i].sec_idx = j;
577 break;
578 }
579 }
Kyle Pieferb1027b02017-02-10 13:58:58 -0800580 }
581 return 0;
582}
583
584/*
585 * rpmh_arc_votes_init() - initialized RPMh votes needed for rails voltage
586 * scaling by GMU.
587 * @gmu: Pointer to GMU device
588 * @pri_rail: Pointer to primary power rail VLVL table
589 * @sec_rail: Pointer to second/dependent power rail VLVL table
590 * of pri_rail VLVL table
591 * @type: the type of the primary rail, GPU or GMU
592 */
593static int rpmh_arc_votes_init(struct gmu_device *gmu,
594 struct rpmh_arc_vals *pri_rail,
595 struct rpmh_arc_vals *sec_rail,
596 unsigned int type)
597{
George Shen07b4f782017-07-13 10:42:53 -0700598 struct device *dev;
599 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800600 unsigned int num_freqs;
601 struct arc_vote_desc *votes;
602 unsigned int vlvl_tbl[MAX_GX_LEVELS];
603 unsigned int *freq_tbl;
604 int i, ret;
George Shen07b4f782017-07-13 10:42:53 -0700605 struct dev_pm_opp *opp;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800606
607 if (type == GPU_ARC_VOTE) {
608 num_freqs = gmu->num_gpupwrlevels;
609 votes = gmu->rpmh_votes.gx_votes;
George Shen07b4f782017-07-13 10:42:53 -0700610 freq_tbl = gmu->gpu_freqs;
611 dev = &device->pdev->dev;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800612 } else if (type == GMU_ARC_VOTE) {
613 num_freqs = gmu->num_gmupwrlevels;
614 votes = gmu->rpmh_votes.cx_votes;
George Shen07b4f782017-07-13 10:42:53 -0700615 freq_tbl = gmu->gmu_freqs;
616 dev = &gmu->pdev->dev;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800617 } else {
618 return -EINVAL;
619 }
620
621 if (num_freqs > pri_rail->num) {
622 dev_err(&gmu->pdev->dev,
623 "%s defined more DCVS levels than RPMh can support\n",
624 debug_strs[type]);
625 return -EINVAL;
626 }
627
George Shen07b4f782017-07-13 10:42:53 -0700628 memset(vlvl_tbl, 0, sizeof(vlvl_tbl));
Kyle Pieferb1027b02017-02-10 13:58:58 -0800629 for (i = 0; i < num_freqs; i++) {
George Shen07b4f782017-07-13 10:42:53 -0700630 /* Hardcode VLVL for 0 because it is not registered in OPP */
631 if (freq_tbl[i] == 0) {
632 vlvl_tbl[i] = 0;
633 continue;
634 }
635
636 /* Otherwise get the value from the OPP API */
637 opp = dev_pm_opp_find_freq_exact(dev, freq_tbl[i], true);
638 if (IS_ERR(opp)) {
639 dev_err(&gmu->pdev->dev,
640 "Failed to find opp freq %d of %s\n",
641 freq_tbl[i], debug_strs[type]);
642 return PTR_ERR(opp);
643 }
644
645 /* Values from OPP framework are offset by 1 */
646 vlvl_tbl[i] = dev_pm_opp_get_voltage(opp) - 1;
Kyle Pieferb1027b02017-02-10 13:58:58 -0800647 }
648
649 ret = setup_volt_dependency_tbl(votes,
650 pri_rail, sec_rail, vlvl_tbl, num_freqs);
651
652 if (ret)
653 dev_err(&gmu->pdev->dev, "%s rail volt failed to match DT freqs\n",
654 debug_strs[type]);
655
656 return ret;
657}
658
659/*
660 * build_rpmh_bw_votes() - build TCS commands to vote for bandwidth.
661 * Each command sets frequency of a node along path to DDR or CNOC.
662 * @rpmh_vote: Pointer to RPMh vote needed by GMU to set BW via RPMh
663 * @num_usecases: Number of BW use cases (or BW levels)
664 * @handle: Provided by bus driver. It contains TCS command sets for
665 * all BW use cases of a bus client.
666 */
667static void build_rpmh_bw_votes(struct gmu_bw_votes *rpmh_vote,
668 unsigned int num_usecases, struct msm_bus_tcs_handle handle)
669{
670 struct msm_bus_tcs_usecase *tmp;
671 int i, j;
672
673 for (i = 0; i < num_usecases; i++) {
674 tmp = &handle.usecases[i];
675 for (j = 0; j < tmp->num_cmds; j++) {
676 if (!i) {
677 /*
678 * Wait bitmask and TCS command addresses are
679 * same for all bw use cases. To save data volume
680 * exchanged between driver and GMU, only
681 * transfer bitmasks and TCS command addresses
682 * of first set of bw use case
683 */
684 rpmh_vote->cmds_per_bw_vote = tmp->num_cmds;
685 rpmh_vote->cmds_wait_bitmask =
686 tmp->cmds[j].complete ?
687 rpmh_vote->cmds_wait_bitmask
688 | BIT(i)
689 : rpmh_vote->cmds_wait_bitmask
690 & (~BIT(i));
691 rpmh_vote->cmd_addrs[j] = tmp->cmds[j].addr;
692 }
693 rpmh_vote->cmd_data[i][j] = tmp->cmds[j].data;
694 }
695 }
696}
697
698/*
699 * gmu_bus_vote_init - initialized RPMh votes needed for bw scaling by GMU.
700 * @gmu: Pointer to GMU device
701 * @pwr: Pointer to KGSL power controller
702 */
703static int gmu_bus_vote_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
704{
705 struct msm_bus_tcs_usecase *usecases;
706 struct msm_bus_tcs_handle hdl;
707 struct rpmh_votes_t *votes = &gmu->rpmh_votes;
708 int ret;
709
710 usecases = kcalloc(gmu->num_bwlevels, sizeof(*usecases), GFP_KERNEL);
711 if (!usecases)
712 return -ENOMEM;
713
714 hdl.num_usecases = gmu->num_bwlevels;
715 hdl.usecases = usecases;
716
717 /*
718 * Query TCS command set for each use case defined in GPU b/w table
719 */
720 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->pcl);
721 if (ret)
722 return ret;
723
724 build_rpmh_bw_votes(&votes->ddr_votes, gmu->num_bwlevels, hdl);
725
726 /*
727 *Query CNOC TCS command set for each use case defined in cnoc bw table
728 */
729 ret = msm_bus_scale_query_tcs_cmd_all(&hdl, gmu->ccl);
730 if (ret)
731 return ret;
732
733 build_rpmh_bw_votes(&votes->cnoc_votes, gmu->num_cnocbwlevels, hdl);
734
735 kfree(usecases);
736
737 return 0;
738}
739
740int gmu_rpmh_init(struct gmu_device *gmu, struct kgsl_pwrctrl *pwr)
741{
742 struct rpmh_arc_vals gfx_arc, cx_arc, mx_arc;
743 int ret;
744
745 /* Populate BW vote table */
746 ret = gmu_bus_vote_init(gmu, pwr);
747 if (ret)
748 return ret;
749
750 /* Populate GPU and GMU frequency vote table */
751 ret = rpmh_arc_cmds(gmu, &gfx_arc, gfx_res_id);
752 if (ret)
753 return ret;
754
755 ret = rpmh_arc_cmds(gmu, &cx_arc, cx_res_id);
756 if (ret)
757 return ret;
758
759 ret = rpmh_arc_cmds(gmu, &mx_arc, mx_res_id);
760 if (ret)
761 return ret;
762
763 ret = rpmh_arc_votes_init(gmu, &gfx_arc, &mx_arc, GPU_ARC_VOTE);
764 if (ret)
765 return ret;
766
767 return rpmh_arc_votes_init(gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE);
768}
769
770static irqreturn_t gmu_irq_handler(int irq, void *data)
771{
772 struct gmu_device *gmu = data;
773 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700774 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800775 unsigned int status = 0;
776
Kyle Piefere7b06b42017-04-06 13:53:01 -0700777 adreno_read_gmureg(ADRENO_DEVICE(device),
778 ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, &status);
779 adreno_write_gmureg(ADRENO_DEVICE(device),
780 ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800781
Kyle Piefere7b06b42017-04-06 13:53:01 -0700782 /* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700783 if (status & GMU_INT_WDOG_BITE) {
Kyle Piefere7b06b42017-04-06 13:53:01 -0700784 dev_err_ratelimited(&gmu->pdev->dev,
785 "GMU watchdog expired interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700786 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
787 adreno_dispatcher_schedule(device);
788 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700789 if (status & GMU_INT_HOST_AHB_BUS_ERR)
790 dev_err_ratelimited(&gmu->pdev->dev,
791 "AHB bus error interrupt received\n");
792 if (status & ~GMU_AO_INT_MASK)
793 dev_err_ratelimited(&gmu->pdev->dev,
794 "Unhandled GMU interrupts 0x%lx\n",
795 status & ~GMU_AO_INT_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800796
Kyle Piefere7b06b42017-04-06 13:53:01 -0700797 return IRQ_HANDLED;
798}
Kyle Pieferb1027b02017-02-10 13:58:58 -0800799
Kyle Piefere7b06b42017-04-06 13:53:01 -0700800static irqreturn_t hfi_irq_handler(int irq, void *data)
801{
802 struct kgsl_hfi *hfi = data;
803 struct gmu_device *gmu = container_of(hfi, struct gmu_device, hfi);
804 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700805 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -0700806 unsigned int status = 0;
807
808 adreno_read_gmureg(ADRENO_DEVICE(device),
809 ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
810 adreno_write_gmureg(ADRENO_DEVICE(device),
811 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status);
812
813 if (status & HFI_IRQ_MSGQ_MASK)
814 tasklet_hi_schedule(&hfi->tasklet);
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700815 if (status & HFI_IRQ_CM3_FAULT_MASK) {
Kyle Piefere7b06b42017-04-06 13:53:01 -0700816 dev_err_ratelimited(&gmu->pdev->dev,
817 "GMU CM3 fault interrupt received\n");
Shrenuj Bansald0fe7462017-05-08 16:11:19 -0700818 adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
819 adreno_dispatcher_schedule(device);
820 }
Kyle Piefere7b06b42017-04-06 13:53:01 -0700821 if (status & ~HFI_IRQ_MASK)
822 dev_err_ratelimited(&gmu->pdev->dev,
823 "Unhandled HFI interrupts 0x%lx\n",
824 status & ~HFI_IRQ_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -0800825
826 return IRQ_HANDLED;
827}
828
829static int gmu_pwrlevel_probe(struct gmu_device *gmu, struct device_node *node)
830{
831 struct device_node *pwrlevel_node, *child;
832
833 pwrlevel_node = of_find_node_by_name(node, "qcom,gmu-pwrlevels");
834
835 if (pwrlevel_node == NULL) {
836 dev_err(&gmu->pdev->dev, "Unable to find 'qcom,gmu-pwrlevels'\n");
837 return -EINVAL;
838 }
839
840 gmu->num_gmupwrlevels = 0;
841
842 for_each_child_of_node(pwrlevel_node, child) {
843 unsigned int index;
844
845 if (of_property_read_u32(child, "reg", &index))
846 return -EINVAL;
847
848 if (index >= MAX_CX_LEVELS) {
849 dev_err(&gmu->pdev->dev, "gmu pwrlevel %d is out of range\n",
850 index);
851 continue;
852 }
853
854 if (index >= gmu->num_gmupwrlevels)
855 gmu->num_gmupwrlevels = index + 1;
856
857 if (of_property_read_u32(child, "qcom,gmu-freq",
858 &gmu->gmu_freqs[index]))
859 return -EINVAL;
860 }
861
862 return 0;
863}
864
George Shencc7e1092017-08-30 10:45:52 -0700865static int gmu_reg_probe(struct gmu_device *gmu, const char *name, bool is_gmu)
Kyle Pieferb1027b02017-02-10 13:58:58 -0800866{
867 struct resource *res;
868
869 res = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM, name);
870 if (res == NULL) {
871 dev_err(&gmu->pdev->dev,
872 "platform_get_resource %s failed\n", name);
873 return -EINVAL;
874 }
875
876 if (res->start == 0 || resource_size(res) == 0) {
877 dev_err(&gmu->pdev->dev,
878 "dev %d %s invalid register region\n",
879 gmu->pdev->dev.id, name);
880 return -EINVAL;
881 }
882
George Shencc7e1092017-08-30 10:45:52 -0700883 if (is_gmu) {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800884 gmu->reg_phys = res->start;
885 gmu->reg_len = resource_size(res);
886 gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
887 resource_size(res));
888
889 if (gmu->reg_virt == NULL) {
890 dev_err(&gmu->pdev->dev, "GMU regs ioremap failed\n");
891 return -ENODEV;
892 }
893
George Shencc7e1092017-08-30 10:45:52 -0700894 } else {
Kyle Pieferb1027b02017-02-10 13:58:58 -0800895 gmu->pdc_reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
896 resource_size(res));
897 if (gmu->pdc_reg_virt == NULL) {
898 dev_err(&gmu->pdev->dev, "PDC regs ioremap failed\n");
899 return -ENODEV;
900 }
901 }
902
903 return 0;
904}
905
906static int gmu_clocks_probe(struct gmu_device *gmu, struct device_node *node)
907{
908 const char *cname;
909 struct property *prop;
910 struct clk *c;
911 int i = 0;
912
913 of_property_for_each_string(node, "clock-names", prop, cname) {
914 c = devm_clk_get(&gmu->pdev->dev, cname);
915
916 if (IS_ERR(c)) {
917 dev_err(&gmu->pdev->dev,
918 "dt: Couldn't get GMU clock: %s\n", cname);
919 return PTR_ERR(c);
920 }
921
922 if (i >= MAX_GMU_CLKS) {
923 dev_err(&gmu->pdev->dev,
924 "dt: too many GMU clocks defined\n");
925 return -EINVAL;
926 }
927
928 gmu->clks[i++] = c;
929 }
930
931 return 0;
932}
933
934static int gmu_gpu_bw_probe(struct gmu_device *gmu)
935{
936 struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
937 struct msm_bus_scale_pdata *bus_scale_table;
938
939 bus_scale_table = msm_bus_cl_get_pdata(device->pdev);
940 if (bus_scale_table == NULL) {
941 dev_err(&gmu->pdev->dev, "dt: cannot get bus table\n");
942 return -ENODEV;
943 }
944
945 gmu->num_bwlevels = bus_scale_table->num_usecases;
946 gmu->pcl = msm_bus_scale_register_client(bus_scale_table);
947 if (!gmu->pcl) {
948 dev_err(&gmu->pdev->dev, "dt: cannot register bus client\n");
949 return -ENODEV;
950 }
951
952 return 0;
953}
954
955static int gmu_cnoc_bw_probe(struct gmu_device *gmu)
956{
957 struct msm_bus_scale_pdata *cnoc_table;
958
959 cnoc_table = msm_bus_cl_get_pdata(gmu->pdev);
960 if (cnoc_table == NULL) {
961 dev_err(&gmu->pdev->dev, "dt: cannot get cnoc table\n");
962 return -ENODEV;
963 }
964
965 gmu->num_cnocbwlevels = cnoc_table->num_usecases;
966 gmu->ccl = msm_bus_scale_register_client(cnoc_table);
967 if (!gmu->ccl) {
968 dev_err(&gmu->pdev->dev, "dt: cannot register cnoc client\n");
969 return -ENODEV;
970 }
971
972 return 0;
973}
974
975static int gmu_regulators_probe(struct gmu_device *gmu,
976 struct device_node *node)
977{
978 const char *name;
979 struct property *prop;
980 struct device *dev = &gmu->pdev->dev;
981 int ret = 0;
982
983 of_property_for_each_string(node, "regulator-names", prop, name) {
984 if (!strcmp(name, "vddcx")) {
985 gmu->cx_gdsc = devm_regulator_get(dev, name);
986 if (IS_ERR(gmu->cx_gdsc)) {
987 ret = PTR_ERR(gmu->cx_gdsc);
988 dev_err(dev, "dt: GMU couldn't get CX gdsc\n");
989 gmu->cx_gdsc = NULL;
990 return ret;
991 }
992 } else if (!strcmp(name, "vdd")) {
993 gmu->gx_gdsc = devm_regulator_get(dev, name);
994 if (IS_ERR(gmu->gx_gdsc)) {
995 ret = PTR_ERR(gmu->gx_gdsc);
996 dev_err(dev, "dt: GMU couldn't get GX gdsc\n");
997 gmu->gx_gdsc = NULL;
998 return ret;
999 }
1000 } else {
1001 dev_err(dev, "dt: Unknown GMU regulator: %s\n", name);
1002 return -ENODEV;
1003 }
1004 }
1005
1006 return 0;
1007}
1008
Kyle Piefere7b06b42017-04-06 13:53:01 -07001009static int gmu_irq_probe(struct gmu_device *gmu)
1010{
1011 int ret;
1012 struct kgsl_hfi *hfi = &gmu->hfi;
1013
1014 hfi->hfi_interrupt_num = platform_get_irq_byname(gmu->pdev,
1015 "kgsl_hfi_irq");
1016 ret = devm_request_irq(&gmu->pdev->dev,
1017 hfi->hfi_interrupt_num,
1018 hfi_irq_handler, IRQF_TRIGGER_HIGH,
1019 "HFI", hfi);
1020 if (ret) {
1021 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1022 hfi->hfi_interrupt_num, ret);
1023 return ret;
1024 }
1025
1026 gmu->gmu_interrupt_num = platform_get_irq_byname(gmu->pdev,
1027 "kgsl_gmu_irq");
1028 ret = devm_request_irq(&gmu->pdev->dev,
1029 gmu->gmu_interrupt_num,
1030 gmu_irq_handler, IRQF_TRIGGER_HIGH,
1031 "GMU", gmu);
1032 if (ret)
1033 dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
1034 gmu->gmu_interrupt_num, ret);
1035
1036 return ret;
1037}
1038
1039static void gmu_irq_enable(struct kgsl_device *device)
1040{
1041 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1042 struct gmu_device *gmu = &device->gmu;
1043 struct kgsl_hfi *hfi = &gmu->hfi;
1044
1045 /* Clear any pending IRQs before unmasking on GMU */
1046 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1047 0xFFFFFFFF);
1048 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1049 0xFFFFFFFF);
1050
1051 /* Unmask needed IRQs on GMU */
1052 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1053 (unsigned int) ~HFI_IRQ_MASK);
1054 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1055 (unsigned int) ~GMU_AO_INT_MASK);
1056
1057 /* Enable all IRQs on host */
1058 enable_irq(hfi->hfi_interrupt_num);
1059 enable_irq(gmu->gmu_interrupt_num);
1060}
1061
1062static void gmu_irq_disable(struct kgsl_device *device)
1063{
1064 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1065 struct gmu_device *gmu = &device->gmu;
1066 struct kgsl_hfi *hfi = &gmu->hfi;
1067
1068 /* Disable all IRQs on host */
1069 disable_irq(gmu->gmu_interrupt_num);
1070 disable_irq(hfi->hfi_interrupt_num);
1071
1072 /* Mask all IRQs on GMU */
1073 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
1074 0xFFFFFFFF);
1075 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1076 0xFFFFFFFF);
1077
1078 /* Clear any pending IRQs before disabling */
1079 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
1080 0xFFFFFFFF);
1081 adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
1082 0xFFFFFFFF);
1083}
1084
Kyle Pieferb1027b02017-02-10 13:58:58 -08001085/* Do not access any GMU registers in GMU probe function */
1086int gmu_probe(struct kgsl_device *device)
1087{
1088 struct device_node *node;
1089 struct gmu_device *gmu = &device->gmu;
1090 struct gmu_memdesc *mem_addr = NULL;
1091 struct kgsl_hfi *hfi = &gmu->hfi;
1092 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
Kyle Pieferd3964162017-04-06 15:44:03 -07001093 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001094 int i = 0, ret = -ENXIO;
1095
1096 node = of_find_compatible_node(device->pdev->dev.of_node,
1097 NULL, "qcom,gpu-gmu");
1098
1099 if (node == NULL)
1100 return ret;
1101
1102 device->gmu.pdev = of_find_device_by_node(node);
1103
1104 /* Set up GMU regulators */
1105 ret = gmu_regulators_probe(gmu, node);
1106 if (ret)
1107 goto error;
1108
1109 /* Set up GMU clocks */
1110 ret = gmu_clocks_probe(gmu, node);
1111 if (ret)
1112 goto error;
1113
1114 /* Set up GMU IOMMU and shared memory with GMU */
1115 ret = gmu_memory_probe(&device->gmu, node);
1116 if (ret)
1117 goto error;
1118 mem_addr = gmu->hfi_mem;
1119
1120 /* Map and reserve GMU CSRs registers */
George Shencc7e1092017-08-30 10:45:52 -07001121 ret = gmu_reg_probe(gmu, "kgsl_gmu_reg", true);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001122 if (ret)
1123 goto error;
1124
George Shencc7e1092017-08-30 10:45:52 -07001125 ret = gmu_reg_probe(gmu, "kgsl_gmu_pdc_reg", false);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001126 if (ret)
1127 goto error;
1128
1129 gmu->gmu2gpu_offset = (gmu->reg_phys - device->reg_phys) >> 2;
1130
Kyle Piefere7b06b42017-04-06 13:53:01 -07001131 /* Initialize HFI and GMU interrupts */
1132 ret = gmu_irq_probe(gmu);
1133 if (ret)
Kyle Pieferb1027b02017-02-10 13:58:58 -08001134 goto error;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001135
1136 /* Don't enable GMU interrupts until GMU started */
Kyle Piefere7b06b42017-04-06 13:53:01 -07001137 /* We cannot use gmu_irq_disable because it writes registers */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001138 disable_irq(gmu->gmu_interrupt_num);
1139 disable_irq(hfi->hfi_interrupt_num);
1140
1141 tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long)gmu);
1142 INIT_LIST_HEAD(&hfi->msglist);
1143 spin_lock_init(&hfi->msglock);
1144
1145 /* Retrieves GMU/GPU power level configurations*/
1146 ret = gmu_pwrlevel_probe(gmu, node);
1147 if (ret)
1148 goto error;
1149
1150 gmu->num_gpupwrlevels = pwr->num_pwrlevels;
Kyle Pieferb801ab92017-07-13 14:54:13 -07001151 gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001152
1153 for (i = 0; i < gmu->num_gpupwrlevels; i++) {
1154 int j = gmu->num_gpupwrlevels - 1 - i;
1155
1156 gmu->gpu_freqs[i] = pwr->pwrlevels[j].gpu_freq;
1157 }
1158
1159 /* Initializes GPU b/w levels configuration */
1160 ret = gmu_gpu_bw_probe(gmu);
1161 if (ret)
1162 goto error;
1163
1164 /* Initialize GMU CNOC b/w levels configuration */
1165 ret = gmu_cnoc_bw_probe(gmu);
1166 if (ret)
1167 goto error;
1168
1169 /* Populates RPMh configurations */
1170 ret = gmu_rpmh_init(gmu, pwr);
1171 if (ret)
1172 goto error;
1173
1174 hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
1175
Kyle Pieferd3964162017-04-06 15:44:03 -07001176 /* Set up GMU idle states */
1177 if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT))
1178 gmu->idle_level = GPU_HW_MIN_VOLT;
1179 else if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP))
1180 gmu->idle_level = GPU_HW_NAP;
1181 else if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
1182 gmu->idle_level = GPU_HW_IFPC;
1183 else if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
1184 gmu->idle_level = GPU_HW_SPTP_PC;
1185 else
1186 gmu->idle_level = GPU_HW_ACTIVE;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001187
Oleg Pereletc2ab7f72017-06-22 16:45:57 -07001188 /* disable LM during boot time */
1189 clear_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001190 return 0;
1191
1192error:
1193 gmu_remove(device);
1194 return ret;
1195}
1196
1197
1198
1199static int gmu_enable_clks(struct gmu_device *gmu)
1200{
1201 int ret, j = 0;
1202
1203 if (IS_ERR_OR_NULL(gmu->clks[0]))
1204 return -EINVAL;
1205
1206 ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1207 if (ret) {
1208 dev_err(&gmu->pdev->dev, "fail to set default GMU clk freq %d\n",
1209 gmu->gmu_freqs[DEFAULT_GMU_FREQ_IDX]);
1210 return ret;
1211 }
1212
1213 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1214 ret = clk_prepare_enable(gmu->clks[j]);
1215 if (ret) {
1216 dev_err(&gmu->pdev->dev,
1217 "fail to enable gpucc clk idx %d\n",
1218 j);
1219 return ret;
1220 }
1221 j++;
1222 }
1223
1224 set_bit(GMU_CLK_ON, &gmu->flags);
1225 return 0;
1226}
1227
1228static int gmu_disable_clks(struct gmu_device *gmu)
1229{
Kyle Pieferde855722017-07-07 12:18:59 -07001230 int j = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001231
1232 if (IS_ERR_OR_NULL(gmu->clks[0]))
1233 return 0;
1234
Kyle Pieferb1027b02017-02-10 13:58:58 -08001235 while ((j < MAX_GMU_CLKS) && gmu->clks[j]) {
1236 clk_disable_unprepare(gmu->clks[j]);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001237 j++;
1238 }
1239
1240 clear_bit(GMU_CLK_ON, &gmu->flags);
1241 return 0;
1242
1243}
1244
1245static int gmu_enable_gdsc(struct gmu_device *gmu)
1246{
1247 int ret;
1248
1249 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1250 return 0;
1251
1252 ret = regulator_enable(gmu->cx_gdsc);
1253 if (ret)
1254 dev_err(&gmu->pdev->dev,
1255 "Failed to enable GMU CX gdsc, error %d\n", ret);
1256
1257 return ret;
1258}
1259
George Shenf364b542017-06-20 17:02:43 -07001260#define CX_GDSC_TIMEOUT 500 /* ms */
Kyle Pieferb1027b02017-02-10 13:58:58 -08001261static int gmu_disable_gdsc(struct gmu_device *gmu)
1262{
1263 int ret;
George Shen433b0c72017-06-12 09:44:34 -07001264 unsigned long t;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001265
1266 if (IS_ERR_OR_NULL(gmu->cx_gdsc))
1267 return 0;
1268
1269 ret = regulator_disable(gmu->cx_gdsc);
George Shen433b0c72017-06-12 09:44:34 -07001270 if (ret) {
Kyle Pieferb1027b02017-02-10 13:58:58 -08001271 dev_err(&gmu->pdev->dev,
1272 "Failed to disable GMU CX gdsc, error %d\n", ret);
George Shen433b0c72017-06-12 09:44:34 -07001273 return ret;
1274 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001275
George Shen433b0c72017-06-12 09:44:34 -07001276 /*
1277 * After GX GDSC is off, CX GDSC must be off
1278 * Voting off alone from GPU driver cannot
1279 * Guarantee CX GDSC off. Polling with 10ms
1280 * timeout to ensure
1281 */
1282 t = jiffies + msecs_to_jiffies(CX_GDSC_TIMEOUT);
1283 do {
1284 if (!regulator_is_enabled(gmu->cx_gdsc))
1285 return 0;
George Shenf364b542017-06-20 17:02:43 -07001286 cond_resched();
George Shen433b0c72017-06-12 09:44:34 -07001287
1288 } while (!(time_after(jiffies, t)));
1289
1290 dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout");
1291 return -ETIMEDOUT;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001292}
1293
Kyle Piefere923b7a2017-03-28 17:31:48 -07001294static int gmu_fast_boot(struct kgsl_device *device)
1295{
1296 int ret;
1297 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1298 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1299 struct gmu_device *gmu = &device->gmu;
1300
1301 hfi_stop(gmu);
1302 clear_bit(GMU_HFI_ON, &gmu->flags);
1303
1304 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1305 GMU_RESET, 0);
1306 if (ret)
1307 return ret;
1308
1309 /*FIXME: enabling WD interrupt*/
1310
1311 ret = hfi_start(gmu, GMU_WARM_BOOT);
1312 if (ret)
1313 return ret;
1314
1315 ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
1316 OOB_CPINIT_CHECK_MASK, OOB_CPINIT_CLEAR_MASK);
1317
1318 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1319 gpudev->oob_clear(adreno_dev,
1320 OOB_BOOT_SLUMBER_CLEAR_MASK);
1321
1322 return ret;
1323}
1324
1325static int gmu_suspend(struct kgsl_device *device)
1326{
1327 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1328 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1329 struct gmu_device *gmu = &device->gmu;
1330
1331 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1332 return 0;
1333
1334 /* Pending message in all queues are abandoned */
1335 hfi_stop(gmu);
1336 clear_bit(GMU_HFI_ON, &gmu->flags);
1337 gmu_irq_disable(device);
1338
1339 if (gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0))
1340 return -EINVAL;
1341
1342 gmu_disable_clks(gmu);
1343 gmu_disable_gdsc(gmu);
1344 return 0;
1345}
1346
George Shen6927d8f2017-07-19 11:38:10 -07001347static void gmu_snapshot(struct kgsl_device *device)
1348{
1349 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1350 struct gmu_device *gmu = &device->gmu;
1351
1352 if (!test_and_set_bit(GMU_FAULT, &gmu->flags)) {
1353 /* Mask so there's no interrupt caused by NMI */
1354 adreno_write_gmureg(adreno_dev,
1355 ADRENO_REG_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
1356
1357 /* Make sure the interrupt is masked before causing it */
1358 wmb();
1359 adreno_write_gmureg(adreno_dev,
1360 ADRENO_REG_GMU_NMI_CONTROL_STATUS, 0);
1361 adreno_write_gmureg(adreno_dev,
1362 ADRENO_REG_GMU_CM3_CFG, (1 << 9));
1363
1364 /* Wait for the NMI to be handled */
1365 wmb();
1366 udelay(100);
Carter Cooper4300d0f42017-08-25 14:28:50 -06001367 kgsl_device_snapshot(device, ERR_PTR(-EINVAL));
George Shen6927d8f2017-07-19 11:38:10 -07001368
1369 adreno_write_gmureg(adreno_dev,
1370 ADRENO_REG_GMU_GMU2HOST_INTR_CLR, 0xFFFFFFFF);
1371 adreno_write_gmureg(adreno_dev,
1372 ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
1373 (unsigned int) ~HFI_IRQ_MASK);
1374 }
1375}
1376
Kyle Pieferb1027b02017-02-10 13:58:58 -08001377/* To be called to power on both GPU and GMU */
1378int gmu_start(struct kgsl_device *device)
1379{
Kyle Pieferb801ab92017-07-13 14:54:13 -07001380 int ret = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001381 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1382 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
1383 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
1384 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001385
Kyle Piefere923b7a2017-03-28 17:31:48 -07001386 switch (device->state) {
1387 case KGSL_STATE_INIT:
1388 case KGSL_STATE_SUSPEND:
1389 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1390 gmu_enable_gdsc(gmu);
1391 gmu_enable_clks(gmu);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001392
Kyle Pieferb1027b02017-02-10 13:58:58 -08001393 /* Vote for 300MHz DDR for GMU to init */
1394 ret = msm_bus_scale_client_update_request(gmu->pcl,
Kyle Pieferb801ab92017-07-13 14:54:13 -07001395 pwr->pwrlevels[pwr->default_pwrlevel].bus_freq);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001396 if (ret) {
1397 dev_err(&gmu->pdev->dev,
1398 "Failed to allocate gmu b/w\n");
1399 goto error_clks;
1400 }
1401
1402 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1403 GMU_COLD_BOOT, 0);
1404 if (ret)
1405 goto error_bus;
1406
Kyle Piefere7b06b42017-04-06 13:53:01 -07001407 gmu_irq_enable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001408
1409 ret = hfi_start(gmu, GMU_COLD_BOOT);
1410 if (ret)
1411 goto error_gpu;
1412
1413 /* Send default DCVS level */
Kyle Pieferb801ab92017-07-13 14:54:13 -07001414 ret = gmu_dcvs_set(gmu, pwr->default_pwrlevel,
1415 pwr->pwrlevels[pwr->default_pwrlevel].bus_freq);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001416 if (ret)
1417 goto error_gpu;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001418
1419 msm_bus_scale_client_update_request(gmu->pcl, 0);
1420 break;
1421
1422 case KGSL_STATE_SLUMBER:
1423 WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
1424 gmu_enable_gdsc(gmu);
1425 gmu_enable_clks(gmu);
1426
Kyle Pieferb1027b02017-02-10 13:58:58 -08001427 ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
1428 GMU_WARM_BOOT, 0);
1429 if (ret)
1430 goto error_clks;
1431
Kyle Piefere7b06b42017-04-06 13:53:01 -07001432 gmu_irq_enable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001433
1434 ret = hfi_start(gmu, GMU_WARM_BOOT);
1435 if (ret)
1436 goto error_gpu;
1437
Kyle Piefer50af7d02017-07-25 11:00:17 -07001438 ret = gmu_dcvs_set(gmu, gmu->wakeup_pwrlevel,
1439 pwr->pwrlevels[gmu->wakeup_pwrlevel].bus_freq);
1440 if (ret)
1441 goto error_gpu;
1442
1443 gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
Kyle Piefere923b7a2017-03-28 17:31:48 -07001444 break;
1445
1446 case KGSL_STATE_RESET:
1447 if (test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv)) {
1448 gmu_suspend(device);
1449 gmu_enable_gdsc(gmu);
1450 gmu_enable_clks(gmu);
1451
Kyle Piefere923b7a2017-03-28 17:31:48 -07001452 ret = gpudev->rpmh_gpu_pwrctrl(
1453 adreno_dev, GMU_FW_START, GMU_RESET, 0);
1454 if (ret)
1455 goto error_clks;
1456
1457 gmu_irq_enable(device);
1458
Kyle Piefer7a714cd2017-06-21 15:55:47 -07001459 ret = hfi_start(gmu, GMU_COLD_BOOT);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001460 if (ret)
1461 goto error_gpu;
1462
1463 /* Send DCVS level prior to reset*/
Kyle Pieferb801ab92017-07-13 14:54:13 -07001464 ret = gmu_dcvs_set(gmu, pwr->active_pwrlevel,
1465 pwr->pwrlevels[pwr->active_pwrlevel]
1466 .bus_freq);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001467 if (ret)
1468 goto error_gpu;
1469
1470 ret = gpudev->oob_set(adreno_dev,
1471 OOB_CPINIT_SET_MASK,
1472 OOB_CPINIT_CHECK_MASK,
1473 OOB_CPINIT_CLEAR_MASK);
1474
Kyle Pieferb801ab92017-07-13 14:54:13 -07001475 } else
Kyle Piefere923b7a2017-03-28 17:31:48 -07001476 gmu_fast_boot(device);
Kyle Piefere923b7a2017-03-28 17:31:48 -07001477 break;
1478 default:
1479 break;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001480 }
1481
Kyle Piefere923b7a2017-03-28 17:31:48 -07001482 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1483 gpudev->oob_clear(adreno_dev,
1484 OOB_BOOT_SLUMBER_CLEAR_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001485
Kyle Piefere923b7a2017-03-28 17:31:48 -07001486 return ret;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001487
1488error_gpu:
George Shen6927d8f2017-07-19 11:38:10 -07001489 gmu_snapshot(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001490 hfi_stop(gmu);
Kyle Piefere7b06b42017-04-06 13:53:01 -07001491 gmu_irq_disable(device);
George Shen6927d8f2017-07-19 11:38:10 -07001492 if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
1493 gpudev->oob_clear(adreno_dev,
1494 OOB_BOOT_SLUMBER_CLEAR_MASK);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001495 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
1496error_bus:
George Shen6927d8f2017-07-19 11:38:10 -07001497 msm_bus_scale_client_update_request(gmu->pcl, 0);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001498error_clks:
George Shen6927d8f2017-07-19 11:38:10 -07001499 gmu_snapshot(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001500 gmu_disable_clks(gmu);
1501 gmu_disable_gdsc(gmu);
1502 return ret;
1503}
1504
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001505#define GMU_IDLE_TIMEOUT 10 /* ms */
1506
Kyle Pieferb1027b02017-02-10 13:58:58 -08001507/* Caller shall ensure GPU is ready for SLUMBER */
1508void gmu_stop(struct kgsl_device *device)
1509{
1510 struct gmu_device *gmu = &device->gmu;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001511 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1512 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001513 unsigned long t;
1514 bool idle = false;
George Shenf2d4e052017-05-11 16:28:23 -07001515 unsigned int reg;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001516
1517 if (!test_bit(GMU_CLK_ON, &gmu->flags))
1518 return;
1519
George Shenf2d4e052017-05-11 16:28:23 -07001520 t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
1521 while (!time_after(jiffies, t)) {
1522 adreno_read_gmureg(ADRENO_DEVICE(device),
1523 ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
1524 if (reg == device->gmu.idle_level) {
1525 idle = true;
1526 break;
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001527 }
George Shen56c9cdb2017-08-25 10:43:32 -07001528 /* Wait 100us to reduce unnecessary AHB bus traffic */
1529 udelay(100);
1530 cond_resched();
Oleg Perelet62d5cec2017-03-27 16:14:52 -07001531 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001532
1533 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
1534
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001535 if (!idle || (gpudev->wait_for_gmu_idle &&
1536 gpudev->wait_for_gmu_idle(adreno_dev))) {
Kyle Piefercfce3d52017-05-30 17:10:11 -07001537 dev_err(&gmu->pdev->dev, "Stopping GMU before it is idle\n");
Kyle Piefer5c9478c2017-04-20 15:12:05 -07001538 }
1539
Kyle Pieferb1027b02017-02-10 13:58:58 -08001540 /* Pending message in all queues are abandoned */
1541 hfi_stop(gmu);
1542 clear_bit(GMU_HFI_ON, &gmu->flags);
Kyle Piefere7b06b42017-04-06 13:53:01 -07001543 gmu_irq_disable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001544
1545 gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
1546 gmu_disable_clks(gmu);
1547 gmu_disable_gdsc(gmu);
1548
1549 /* TODO: Vote CX, MX retention off */
1550
1551 msm_bus_scale_client_update_request(gmu->pcl, 0);
1552}
1553
1554void gmu_remove(struct kgsl_device *device)
1555{
1556 struct gmu_device *gmu = &device->gmu;
1557 struct kgsl_hfi *hfi = &gmu->hfi;
Kyle Piefer8570d512017-04-21 14:50:51 -07001558 int i = 0;
Kyle Pieferb1027b02017-02-10 13:58:58 -08001559
1560 if (!device->gmu.pdev)
1561 return;
1562
1563 tasklet_kill(&hfi->tasklet);
1564
1565 gmu_stop(device);
Kyle Piefere7b06b42017-04-06 13:53:01 -07001566 gmu_irq_disable(device);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001567
Kyle Piefer8570d512017-04-21 14:50:51 -07001568 while ((i < MAX_GMU_CLKS) && gmu->clks[i]) {
1569 gmu->clks[i] = NULL;
1570 i++;
1571 }
1572
Kyle Pieferf7febd62017-03-20 16:49:49 -07001573 if (gmu->gmu_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001574 devm_free_irq(&gmu->pdev->dev,
1575 gmu->gmu_interrupt_num, gmu);
1576 gmu->gmu_interrupt_num = 0;
1577 }
1578
1579 if (hfi->hfi_interrupt_num) {
Kyle Pieferf7febd62017-03-20 16:49:49 -07001580 devm_free_irq(&gmu->pdev->dev,
Kyle Piefercec5e212017-05-19 13:15:15 -07001581 hfi->hfi_interrupt_num, hfi);
Kyle Pieferf7febd62017-03-20 16:49:49 -07001582 hfi->hfi_interrupt_num = 0;
1583 }
Kyle Pieferb1027b02017-02-10 13:58:58 -08001584
1585 if (gmu->ccl) {
1586 msm_bus_scale_unregister_client(gmu->ccl);
1587 gmu->ccl = 0;
1588 }
1589
1590 if (gmu->pcl) {
1591 msm_bus_scale_unregister_client(gmu->pcl);
1592 gmu->pcl = 0;
1593 }
1594
1595 if (gmu->pdc_reg_virt) {
1596 devm_iounmap(&gmu->pdev->dev, gmu->pdc_reg_virt);
1597 gmu->pdc_reg_virt = NULL;
1598 }
1599
1600 if (gmu->reg_virt) {
1601 devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
Kyle Pieferb1027b02017-02-10 13:58:58 -08001602 gmu->reg_virt = NULL;
1603 }
1604
1605 if (gmu->hfi_mem || gmu->dump_mem)
1606 gmu_memory_close(&device->gmu);
1607
1608 for (i = 0; i < MAX_GMU_CLKS; i++) {
1609 if (gmu->clks[i]) {
1610 devm_clk_put(&gmu->pdev->dev, gmu->clks[i]);
1611 gmu->clks[i] = NULL;
1612 }
1613 }
1614
1615 if (gmu->gx_gdsc) {
1616 devm_regulator_put(gmu->gx_gdsc);
1617 gmu->gx_gdsc = NULL;
1618 }
1619
1620 if (gmu->cx_gdsc) {
1621 devm_regulator_put(gmu->cx_gdsc);
1622 gmu->cx_gdsc = NULL;
1623 }
1624
1625 device->gmu.pdev = NULL;
1626}