Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 Red Hat |
| 3 | * Author: Rob Clark <robdclark@gmail.com> |
| 4 | * |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 5 | * Copyright (c) 2014 The Linux Foundation. All rights reserved. |
| 6 | * |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 7 | * This program is free software; you can redistribute it and/or modify it |
| 8 | * under the terms of the GNU General Public License version 2 as published by |
| 9 | * the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 14 | * more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License along with |
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #include "adreno_gpu.h" |
| 21 | #include "msm_gem.h" |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 22 | #include "msm_mmu.h" |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 23 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 24 | #define RB_SIZE SZ_32K |
| 25 | #define RB_BLKSIZE 16 |
| 26 | |
| 27 | int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) |
| 28 | { |
| 29 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 30 | |
| 31 | switch (param) { |
| 32 | case MSM_PARAM_GPU_ID: |
| 33 | *value = adreno_gpu->info->revn; |
| 34 | return 0; |
| 35 | case MSM_PARAM_GMEM_SIZE: |
Rob Clark | 5545996 | 2013-12-05 17:39:53 -0500 | [diff] [blame] | 36 | *value = adreno_gpu->gmem; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 37 | return 0; |
Rob Clark | 4e1cbaa | 2014-02-04 14:16:04 -0500 | [diff] [blame] | 38 | case MSM_PARAM_CHIP_ID: |
| 39 | *value = adreno_gpu->rev.patchid | |
| 40 | (adreno_gpu->rev.minor << 8) | |
| 41 | (adreno_gpu->rev.major << 16) | |
| 42 | (adreno_gpu->rev.core << 24); |
| 43 | return 0; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 44 | default: |
| 45 | DBG("%s: invalid param: %u", gpu->name, param); |
| 46 | return -EINVAL; |
| 47 | } |
| 48 | } |
| 49 | |
| 50 | #define rbmemptr(adreno_gpu, member) \ |
| 51 | ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) |
| 52 | |
| 53 | int adreno_hw_init(struct msm_gpu *gpu) |
| 54 | { |
| 55 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 944fc36 | 2014-07-09 22:08:15 -0400 | [diff] [blame] | 56 | int ret; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 57 | |
| 58 | DBG("%s", gpu->name); |
| 59 | |
Rob Clark | a1ad352 | 2014-07-11 11:59:22 -0400 | [diff] [blame] | 60 | ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova); |
Rob Clark | 944fc36 | 2014-07-09 22:08:15 -0400 | [diff] [blame] | 61 | if (ret) { |
| 62 | gpu->rb_iova = 0; |
| 63 | dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); |
| 64 | return ret; |
| 65 | } |
| 66 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 67 | /* Setup REG_CP_RB_CNTL: */ |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 68 | adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 69 | /* size is log2(quad-words): */ |
| 70 | AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | |
Rob Clark | 5545996 | 2013-12-05 17:39:53 -0500 | [diff] [blame] | 71 | AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8))); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 72 | |
| 73 | /* Setup ringbuffer address: */ |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 74 | adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova); |
| 75 | adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, |
| 76 | rbmemptr(adreno_gpu, rptr)); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 77 | |
| 78 | /* Setup scratch/timestamp: */ |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 79 | adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR, |
| 80 | rbmemptr(adreno_gpu, fence)); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 81 | |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 82 | adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 83 | |
| 84 | return 0; |
| 85 | } |
| 86 | |
| 87 | static uint32_t get_wptr(struct msm_ringbuffer *ring) |
| 88 | { |
| 89 | return ring->cur - ring->start; |
| 90 | } |
| 91 | |
| 92 | uint32_t adreno_last_fence(struct msm_gpu *gpu) |
| 93 | { |
| 94 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 95 | return adreno_gpu->memptrs->fence; |
| 96 | } |
| 97 | |
Rob Clark | bd6f82d | 2013-08-24 14:20:38 -0400 | [diff] [blame] | 98 | void adreno_recover(struct msm_gpu *gpu) |
| 99 | { |
| 100 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 101 | struct drm_device *dev = gpu->dev; |
| 102 | int ret; |
| 103 | |
| 104 | gpu->funcs->pm_suspend(gpu); |
| 105 | |
| 106 | /* reset ringbuffer: */ |
| 107 | gpu->rb->cur = gpu->rb->start; |
| 108 | |
| 109 | /* reset completed fence seqno, just discard anything pending: */ |
| 110 | adreno_gpu->memptrs->fence = gpu->submitted_fence; |
Rob Clark | 26791c4 | 2013-09-03 07:12:03 -0400 | [diff] [blame] | 111 | adreno_gpu->memptrs->rptr = 0; |
| 112 | adreno_gpu->memptrs->wptr = 0; |
Rob Clark | bd6f82d | 2013-08-24 14:20:38 -0400 | [diff] [blame] | 113 | |
| 114 | gpu->funcs->pm_resume(gpu); |
| 115 | ret = gpu->funcs->hw_init(gpu); |
| 116 | if (ret) { |
| 117 | dev_err(dev->dev, "gpu hw init failed: %d\n", ret); |
| 118 | /* hmm, oh well? */ |
| 119 | } |
| 120 | } |
| 121 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 122 | int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, |
| 123 | struct msm_file_private *ctx) |
| 124 | { |
| 125 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 126 | struct msm_drm_private *priv = gpu->dev->dev_private; |
| 127 | struct msm_ringbuffer *ring = gpu->rb; |
| 128 | unsigned i, ibs = 0; |
| 129 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 130 | for (i = 0; i < submit->nr_cmds; i++) { |
| 131 | switch (submit->cmd[i].type) { |
| 132 | case MSM_SUBMIT_CMD_IB_TARGET_BUF: |
| 133 | /* ignore IB-targets */ |
| 134 | break; |
| 135 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: |
| 136 | /* ignore if there has not been a ctx switch: */ |
| 137 | if (priv->lastctx == ctx) |
| 138 | break; |
| 139 | case MSM_SUBMIT_CMD_BUF: |
| 140 | OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2); |
| 141 | OUT_RING(ring, submit->cmd[i].iova); |
| 142 | OUT_RING(ring, submit->cmd[i].size); |
| 143 | ibs++; |
| 144 | break; |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | /* on a320, at least, we seem to need to pad things out to an |
| 149 | * even number of qwords to avoid issue w/ CP hanging on wrap- |
| 150 | * around: |
| 151 | */ |
| 152 | if (ibs % 2) |
| 153 | OUT_PKT2(ring); |
| 154 | |
| 155 | OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); |
| 156 | OUT_RING(ring, submit->fence); |
| 157 | |
Aravind Ganesan | 23bd62f | 2014-09-08 13:40:16 -0600 | [diff] [blame] | 158 | if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) { |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 159 | /* Flush HLSQ lazy updates to make sure there is nothing |
| 160 | * pending for indirect loads after the timestamp has |
| 161 | * passed: |
| 162 | */ |
| 163 | OUT_PKT3(ring, CP_EVENT_WRITE, 1); |
| 164 | OUT_RING(ring, HLSQ_FLUSH); |
| 165 | |
| 166 | OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); |
| 167 | OUT_RING(ring, 0x00000000); |
| 168 | } |
| 169 | |
| 170 | OUT_PKT3(ring, CP_EVENT_WRITE, 3); |
| 171 | OUT_RING(ring, CACHE_FLUSH_TS); |
| 172 | OUT_RING(ring, rbmemptr(adreno_gpu, fence)); |
| 173 | OUT_RING(ring, submit->fence); |
| 174 | |
| 175 | /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ |
| 176 | OUT_PKT3(ring, CP_INTERRUPT, 1); |
| 177 | OUT_RING(ring, 0x80000000); |
| 178 | |
| 179 | #if 0 |
| 180 | if (adreno_is_a3xx(adreno_gpu)) { |
| 181 | /* Dummy set-constant to trigger context rollover */ |
| 182 | OUT_PKT3(ring, CP_SET_CONSTANT, 2); |
| 183 | OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); |
| 184 | OUT_RING(ring, 0x00000000); |
| 185 | } |
| 186 | #endif |
| 187 | |
| 188 | gpu->funcs->flush(gpu); |
| 189 | |
| 190 | return 0; |
| 191 | } |
| 192 | |
| 193 | void adreno_flush(struct msm_gpu *gpu) |
| 194 | { |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 195 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 196 | uint32_t wptr = get_wptr(gpu->rb); |
| 197 | |
| 198 | /* ensure writes to ringbuffer have hit system memory: */ |
| 199 | mb(); |
| 200 | |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 201 | adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | void adreno_idle(struct msm_gpu *gpu) |
| 205 | { |
| 206 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 207 | uint32_t wptr = get_wptr(gpu->rb); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 208 | |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 209 | /* wait for CP to drain ringbuffer: */ |
| 210 | if (spin_until(adreno_gpu->memptrs->rptr == wptr)) |
| 211 | DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 212 | |
| 213 | /* TODO maybe we need to reset GPU here to recover from hang? */ |
| 214 | } |
| 215 | |
| 216 | #ifdef CONFIG_DEBUG_FS |
| 217 | void adreno_show(struct msm_gpu *gpu, struct seq_file *m) |
| 218 | { |
| 219 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 3bcefb0 | 2014-09-05 15:05:38 -0400 | [diff] [blame] | 220 | int i; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 221 | |
| 222 | seq_printf(m, "revision: %d (%d.%d.%d.%d)\n", |
| 223 | adreno_gpu->info->revn, adreno_gpu->rev.core, |
| 224 | adreno_gpu->rev.major, adreno_gpu->rev.minor, |
| 225 | adreno_gpu->rev.patchid); |
| 226 | |
| 227 | seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, |
Rob Clark | bd6f82d | 2013-08-24 14:20:38 -0400 | [diff] [blame] | 228 | gpu->submitted_fence); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 229 | seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr); |
| 230 | seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); |
| 231 | seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); |
Rob Clark | 3bcefb0 | 2014-09-05 15:05:38 -0400 | [diff] [blame] | 232 | |
| 233 | gpu->funcs->pm_resume(gpu); |
| 234 | |
| 235 | /* dump these out in a form that can be parsed by demsm: */ |
| 236 | seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name); |
| 237 | for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { |
| 238 | uint32_t start = adreno_gpu->registers[i]; |
| 239 | uint32_t end = adreno_gpu->registers[i+1]; |
| 240 | uint32_t addr; |
| 241 | |
| 242 | for (addr = start; addr <= end; addr++) { |
| 243 | uint32_t val = gpu_read(gpu, addr); |
| 244 | seq_printf(m, "IO:R %08x %08x\n", addr<<2, val); |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | gpu->funcs->pm_suspend(gpu); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 249 | } |
| 250 | #endif |
| 251 | |
Rob Clark | 2671618 | 2015-04-19 10:14:09 -0400 | [diff] [blame] | 252 | /* Dump common gpu status and scratch registers on any hang, to make |
| 253 | * the hangcheck logs more useful. The scratch registers seem always |
| 254 | * safe to read when GPU has hung (unlike some other regs, depending |
| 255 | * on how the GPU hung), and they are useful to match up to cmdstream |
| 256 | * dumps when debugging hangs: |
| 257 | */ |
| 258 | void adreno_dump_info(struct msm_gpu *gpu) |
Rob Clark | 5b6ef08 | 2013-12-22 10:29:43 -0500 | [diff] [blame] | 259 | { |
| 260 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 3bcefb0 | 2014-09-05 15:05:38 -0400 | [diff] [blame] | 261 | int i; |
Rob Clark | 5b6ef08 | 2013-12-22 10:29:43 -0500 | [diff] [blame] | 262 | |
| 263 | printk("revision: %d (%d.%d.%d.%d)\n", |
| 264 | adreno_gpu->info->revn, adreno_gpu->rev.core, |
| 265 | adreno_gpu->rev.major, adreno_gpu->rev.minor, |
| 266 | adreno_gpu->rev.patchid); |
| 267 | |
| 268 | printk("fence: %d/%d\n", adreno_gpu->memptrs->fence, |
| 269 | gpu->submitted_fence); |
| 270 | printk("rptr: %d\n", adreno_gpu->memptrs->rptr); |
| 271 | printk("wptr: %d\n", adreno_gpu->memptrs->wptr); |
| 272 | printk("rb wptr: %d\n", get_wptr(gpu->rb)); |
| 273 | |
Rob Clark | 2671618 | 2015-04-19 10:14:09 -0400 | [diff] [blame] | 274 | for (i = 0; i < 8; i++) { |
| 275 | printk("CP_SCRATCH_REG%d: %u\n", i, |
| 276 | gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i)); |
| 277 | } |
| 278 | } |
| 279 | |
| 280 | /* would be nice to not have to duplicate the _show() stuff with printk(): */ |
| 281 | void adreno_dump(struct msm_gpu *gpu) |
| 282 | { |
| 283 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 284 | int i; |
| 285 | |
Rob Clark | 3bcefb0 | 2014-09-05 15:05:38 -0400 | [diff] [blame] | 286 | /* dump these out in a form that can be parsed by demsm: */ |
| 287 | printk("IO:region %s 00000000 00020000\n", gpu->name); |
| 288 | for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { |
| 289 | uint32_t start = adreno_gpu->registers[i]; |
| 290 | uint32_t end = adreno_gpu->registers[i+1]; |
| 291 | uint32_t addr; |
| 292 | |
| 293 | for (addr = start; addr <= end; addr++) { |
| 294 | uint32_t val = gpu_read(gpu, addr); |
| 295 | printk("IO:R %08x %08x\n", addr<<2, val); |
| 296 | } |
| 297 | } |
Rob Clark | 5b6ef08 | 2013-12-22 10:29:43 -0500 | [diff] [blame] | 298 | } |
| 299 | |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 300 | static uint32_t ring_freewords(struct msm_gpu *gpu) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 301 | { |
| 302 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 303 | uint32_t size = gpu->rb->size / 4; |
| 304 | uint32_t wptr = get_wptr(gpu->rb); |
| 305 | uint32_t rptr = adreno_gpu->memptrs->rptr; |
| 306 | return (rptr + (size - 1) - wptr) % size; |
| 307 | } |
Rob Clark | 26791c4 | 2013-09-03 07:12:03 -0400 | [diff] [blame] | 308 | |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 309 | void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) |
| 310 | { |
| 311 | if (spin_until(ring_freewords(gpu) >= ndwords)) |
| 312 | DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 313 | } |
| 314 | |
| 315 | static const char *iommu_ports[] = { |
| 316 | "gfx3d_user", "gfx3d_priv", |
| 317 | "gfx3d1_user", "gfx3d1_priv", |
| 318 | }; |
| 319 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 320 | int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 321 | struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 322 | { |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 323 | struct adreno_platform_config *config = pdev->dev.platform_data; |
| 324 | struct msm_gpu *gpu = &adreno_gpu->base; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 325 | struct msm_mmu *mmu; |
Rob Clark | e2550b7 | 2014-09-05 13:30:27 -0400 | [diff] [blame] | 326 | int ret; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 327 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 328 | adreno_gpu->funcs = funcs; |
| 329 | adreno_gpu->info = adreno_info(config->rev); |
| 330 | adreno_gpu->gmem = adreno_gpu->info->gmem; |
| 331 | adreno_gpu->revn = adreno_gpu->info->revn; |
| 332 | adreno_gpu->rev = config->rev; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 333 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 334 | gpu->fast_rate = config->fast_rate; |
| 335 | gpu->slow_rate = config->slow_rate; |
| 336 | gpu->bus_freq = config->bus_freq; |
Rob Clark | 6490ad4 | 2015-06-04 10:26:37 -0400 | [diff] [blame^] | 337 | #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 338 | gpu->bus_scale_table = config->bus_scale_table; |
| 339 | #endif |
| 340 | |
| 341 | DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", |
| 342 | gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); |
| 343 | |
Rob Clark | 0122f96 | 2014-10-31 11:50:55 -0400 | [diff] [blame] | 344 | ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, |
| 345 | adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq", |
| 346 | RB_SIZE); |
| 347 | if (ret) |
| 348 | return ret; |
| 349 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 350 | ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 351 | if (ret) { |
| 352 | dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 353 | adreno_gpu->info->pm4fw, ret); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 354 | return ret; |
| 355 | } |
| 356 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 357 | ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 358 | if (ret) { |
| 359 | dev_err(drm->dev, "failed to load %s PFP firmware: %d\n", |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 360 | adreno_gpu->info->pfpfw, ret); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 361 | return ret; |
| 362 | } |
| 363 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 364 | mmu = gpu->mmu; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 365 | if (mmu) { |
| 366 | ret = mmu->funcs->attach(mmu, iommu_ports, |
| 367 | ARRAY_SIZE(iommu_ports)); |
| 368 | if (ret) |
| 369 | return ret; |
| 370 | } |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 371 | |
Rob Clark | a1ad352 | 2014-07-11 11:59:22 -0400 | [diff] [blame] | 372 | mutex_lock(&drm->struct_mutex); |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 373 | adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 374 | MSM_BO_UNCACHED); |
Rob Clark | a1ad352 | 2014-07-11 11:59:22 -0400 | [diff] [blame] | 375 | mutex_unlock(&drm->struct_mutex); |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 376 | if (IS_ERR(adreno_gpu->memptrs_bo)) { |
| 377 | ret = PTR_ERR(adreno_gpu->memptrs_bo); |
| 378 | adreno_gpu->memptrs_bo = NULL; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 379 | dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); |
| 380 | return ret; |
| 381 | } |
| 382 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 383 | adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); |
| 384 | if (!adreno_gpu->memptrs) { |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 385 | dev_err(drm->dev, "could not vmap memptrs\n"); |
| 386 | return -ENOMEM; |
| 387 | } |
| 388 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 389 | ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id, |
| 390 | &adreno_gpu->memptrs_iova); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 391 | if (ret) { |
| 392 | dev_err(drm->dev, "could not map memptrs: %d\n", ret); |
| 393 | return ret; |
| 394 | } |
| 395 | |
| 396 | return 0; |
| 397 | } |
| 398 | |
| 399 | void adreno_gpu_cleanup(struct adreno_gpu *gpu) |
| 400 | { |
| 401 | if (gpu->memptrs_bo) { |
| 402 | if (gpu->memptrs_iova) |
| 403 | msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); |
Rob Clark | 774449e | 2015-05-15 09:19:36 -0400 | [diff] [blame] | 404 | drm_gem_object_unreference_unlocked(gpu->memptrs_bo); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 405 | } |
Markus Elfring | 5acb07e | 2014-11-25 13:44:20 +0100 | [diff] [blame] | 406 | release_firmware(gpu->pm4); |
| 407 | release_firmware(gpu->pfp); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 408 | msm_gpu_cleanup(&gpu->base); |
| 409 | } |