Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 Red Hat |
| 3 | * Author: Rob Clark <robdclark@gmail.com> |
| 4 | * |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 5 | * Copyright (c) 2014 The Linux Foundation. All rights reserved. |
| 6 | * |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 7 | * This program is free software; you can redistribute it and/or modify it |
| 8 | * under the terms of the GNU General Public License version 2 as published by |
| 9 | * the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 14 | * more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License along with |
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #include "adreno_gpu.h" |
| 21 | #include "msm_gem.h" |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 22 | #include "msm_mmu.h" |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 23 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 24 | #define RB_SIZE SZ_32K |
| 25 | #define RB_BLKSIZE 16 |
| 26 | |
| 27 | int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) |
| 28 | { |
| 29 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 30 | |
| 31 | switch (param) { |
| 32 | case MSM_PARAM_GPU_ID: |
| 33 | *value = adreno_gpu->info->revn; |
| 34 | return 0; |
| 35 | case MSM_PARAM_GMEM_SIZE: |
Rob Clark | 5545996 | 2013-12-05 17:39:53 -0500 | [diff] [blame] | 36 | *value = adreno_gpu->gmem; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 37 | return 0; |
Rob Clark | 4e1cbaa | 2014-02-04 14:16:04 -0500 | [diff] [blame] | 38 | case MSM_PARAM_CHIP_ID: |
| 39 | *value = adreno_gpu->rev.patchid | |
| 40 | (adreno_gpu->rev.minor << 8) | |
| 41 | (adreno_gpu->rev.major << 16) | |
| 42 | (adreno_gpu->rev.core << 24); |
| 43 | return 0; |
Rob Clark | 4102a9e | 2016-02-09 12:05:30 -0500 | [diff] [blame] | 44 | case MSM_PARAM_MAX_FREQ: |
| 45 | *value = adreno_gpu->base.fast_rate; |
| 46 | return 0; |
Rob Clark | 6c77d1a | 2016-02-22 06:26:21 -0500 | [diff] [blame] | 47 | case MSM_PARAM_TIMESTAMP: |
| 48 | if (adreno_gpu->funcs->get_timestamp) |
| 49 | return adreno_gpu->funcs->get_timestamp(gpu, value); |
| 50 | return -EINVAL; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 51 | default: |
| 52 | DBG("%s: invalid param: %u", gpu->name, param); |
| 53 | return -EINVAL; |
| 54 | } |
| 55 | } |
| 56 | |
| 57 | #define rbmemptr(adreno_gpu, member) \ |
| 58 | ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) |
| 59 | |
| 60 | int adreno_hw_init(struct msm_gpu *gpu) |
| 61 | { |
| 62 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 944fc36 | 2014-07-09 22:08:15 -0400 | [diff] [blame] | 63 | int ret; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 64 | |
| 65 | DBG("%s", gpu->name); |
| 66 | |
Rob Clark | a1ad352 | 2014-07-11 11:59:22 -0400 | [diff] [blame] | 67 | ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova); |
Rob Clark | 944fc36 | 2014-07-09 22:08:15 -0400 | [diff] [blame] | 68 | if (ret) { |
| 69 | gpu->rb_iova = 0; |
| 70 | dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); |
| 71 | return ret; |
| 72 | } |
| 73 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 74 | /* Setup REG_CP_RB_CNTL: */ |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 75 | adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 76 | /* size is log2(quad-words): */ |
| 77 | AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | |
Craig Stout | 7d0c5ee | 2016-02-18 16:50:02 -0800 | [diff] [blame] | 78 | AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) | |
| 79 | (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 80 | |
| 81 | /* Setup ringbuffer address: */ |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 82 | adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 83 | |
Craig Stout | 7d0c5ee | 2016-02-18 16:50:02 -0800 | [diff] [blame] | 84 | if (!adreno_is_a430(adreno_gpu)) |
| 85 | adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, |
| 86 | rbmemptr(adreno_gpu, rptr)); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 87 | |
| 88 | return 0; |
| 89 | } |
| 90 | |
| 91 | static uint32_t get_wptr(struct msm_ringbuffer *ring) |
| 92 | { |
| 93 | return ring->cur - ring->start; |
| 94 | } |
| 95 | |
Craig Stout | 7d0c5ee | 2016-02-18 16:50:02 -0800 | [diff] [blame] | 96 | /* Use this helper to read rptr, since a430 doesn't update rptr in memory */ |
| 97 | static uint32_t get_rptr(struct adreno_gpu *adreno_gpu) |
| 98 | { |
| 99 | if (adreno_is_a430(adreno_gpu)) |
| 100 | return adreno_gpu->memptrs->rptr = adreno_gpu_read( |
| 101 | adreno_gpu, REG_ADRENO_CP_RB_RPTR); |
| 102 | else |
| 103 | return adreno_gpu->memptrs->rptr; |
| 104 | } |
| 105 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 106 | uint32_t adreno_last_fence(struct msm_gpu *gpu) |
| 107 | { |
| 108 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 109 | return adreno_gpu->memptrs->fence; |
| 110 | } |
| 111 | |
Rob Clark | bd6f82d | 2013-08-24 14:20:38 -0400 | [diff] [blame] | 112 | void adreno_recover(struct msm_gpu *gpu) |
| 113 | { |
| 114 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 115 | struct drm_device *dev = gpu->dev; |
| 116 | int ret; |
| 117 | |
| 118 | gpu->funcs->pm_suspend(gpu); |
| 119 | |
| 120 | /* reset ringbuffer: */ |
| 121 | gpu->rb->cur = gpu->rb->start; |
| 122 | |
| 123 | /* reset completed fence seqno, just discard anything pending: */ |
| 124 | adreno_gpu->memptrs->fence = gpu->submitted_fence; |
Rob Clark | 26791c4 | 2013-09-03 07:12:03 -0400 | [diff] [blame] | 125 | adreno_gpu->memptrs->rptr = 0; |
| 126 | adreno_gpu->memptrs->wptr = 0; |
Rob Clark | bd6f82d | 2013-08-24 14:20:38 -0400 | [diff] [blame] | 127 | |
| 128 | gpu->funcs->pm_resume(gpu); |
| 129 | ret = gpu->funcs->hw_init(gpu); |
| 130 | if (ret) { |
| 131 | dev_err(dev->dev, "gpu hw init failed: %d\n", ret); |
| 132 | /* hmm, oh well? */ |
| 133 | } |
| 134 | } |
| 135 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 136 | int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, |
| 137 | struct msm_file_private *ctx) |
| 138 | { |
| 139 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 140 | struct msm_drm_private *priv = gpu->dev->dev_private; |
| 141 | struct msm_ringbuffer *ring = gpu->rb; |
| 142 | unsigned i, ibs = 0; |
| 143 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 144 | for (i = 0; i < submit->nr_cmds; i++) { |
| 145 | switch (submit->cmd[i].type) { |
| 146 | case MSM_SUBMIT_CMD_IB_TARGET_BUF: |
| 147 | /* ignore IB-targets */ |
| 148 | break; |
| 149 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: |
| 150 | /* ignore if there has not been a ctx switch: */ |
| 151 | if (priv->lastctx == ctx) |
| 152 | break; |
| 153 | case MSM_SUBMIT_CMD_BUF: |
Craig Stout | 357ff00 | 2016-02-18 16:50:00 -0800 | [diff] [blame] | 154 | OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? |
| 155 | CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 156 | OUT_RING(ring, submit->cmd[i].iova); |
| 157 | OUT_RING(ring, submit->cmd[i].size); |
| 158 | ibs++; |
| 159 | break; |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | /* on a320, at least, we seem to need to pad things out to an |
| 164 | * even number of qwords to avoid issue w/ CP hanging on wrap- |
| 165 | * around: |
| 166 | */ |
| 167 | if (ibs % 2) |
| 168 | OUT_PKT2(ring); |
| 169 | |
| 170 | OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); |
| 171 | OUT_RING(ring, submit->fence); |
| 172 | |
Aravind Ganesan | 23bd62f | 2014-09-08 13:40:16 -0600 | [diff] [blame] | 173 | if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) { |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 174 | /* Flush HLSQ lazy updates to make sure there is nothing |
| 175 | * pending for indirect loads after the timestamp has |
| 176 | * passed: |
| 177 | */ |
| 178 | OUT_PKT3(ring, CP_EVENT_WRITE, 1); |
| 179 | OUT_RING(ring, HLSQ_FLUSH); |
| 180 | |
| 181 | OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); |
| 182 | OUT_RING(ring, 0x00000000); |
| 183 | } |
| 184 | |
| 185 | OUT_PKT3(ring, CP_EVENT_WRITE, 3); |
| 186 | OUT_RING(ring, CACHE_FLUSH_TS); |
| 187 | OUT_RING(ring, rbmemptr(adreno_gpu, fence)); |
| 188 | OUT_RING(ring, submit->fence); |
| 189 | |
| 190 | /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ |
| 191 | OUT_PKT3(ring, CP_INTERRUPT, 1); |
| 192 | OUT_RING(ring, 0x80000000); |
| 193 | |
Rob Clark | d735fdc | 2015-05-12 11:29:40 -0400 | [diff] [blame] | 194 | /* Workaround for missing irq issue on 8x16/a306. Unsure if the |
| 195 | * root cause is a platform issue or some a306 quirk, but this |
| 196 | * keeps things humming along: |
| 197 | */ |
| 198 | if (adreno_is_a306(adreno_gpu)) { |
| 199 | OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); |
| 200 | OUT_RING(ring, 0x00000000); |
| 201 | OUT_PKT3(ring, CP_INTERRUPT, 1); |
| 202 | OUT_RING(ring, 0x80000000); |
| 203 | } |
| 204 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 205 | #if 0 |
| 206 | if (adreno_is_a3xx(adreno_gpu)) { |
| 207 | /* Dummy set-constant to trigger context rollover */ |
| 208 | OUT_PKT3(ring, CP_SET_CONSTANT, 2); |
| 209 | OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); |
| 210 | OUT_RING(ring, 0x00000000); |
| 211 | } |
| 212 | #endif |
| 213 | |
| 214 | gpu->funcs->flush(gpu); |
| 215 | |
| 216 | return 0; |
| 217 | } |
| 218 | |
| 219 | void adreno_flush(struct msm_gpu *gpu) |
| 220 | { |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 221 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 222 | uint32_t wptr = get_wptr(gpu->rb); |
| 223 | |
| 224 | /* ensure writes to ringbuffer have hit system memory: */ |
| 225 | mb(); |
| 226 | |
Aravind Ganesan | 91b74e9 | 2014-09-08 10:57:28 -0600 | [diff] [blame] | 227 | adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 228 | } |
| 229 | |
| 230 | void adreno_idle(struct msm_gpu *gpu) |
| 231 | { |
| 232 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 233 | uint32_t wptr = get_wptr(gpu->rb); |
Craig Stout | 7d0c5ee | 2016-02-18 16:50:02 -0800 | [diff] [blame] | 234 | int ret; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 235 | |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 236 | /* wait for CP to drain ringbuffer: */ |
Craig Stout | 7d0c5ee | 2016-02-18 16:50:02 -0800 | [diff] [blame] | 237 | ret = spin_until(get_rptr(adreno_gpu) == wptr); |
| 238 | |
| 239 | if (ret) |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 240 | DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 241 | |
| 242 | /* TODO maybe we need to reset GPU here to recover from hang? */ |
| 243 | } |
| 244 | |
| 245 | #ifdef CONFIG_DEBUG_FS |
| 246 | void adreno_show(struct msm_gpu *gpu, struct seq_file *m) |
| 247 | { |
| 248 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 3bcefb0 | 2014-09-05 15:05:38 -0400 | [diff] [blame] | 249 | int i; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 250 | |
| 251 | seq_printf(m, "revision: %d (%d.%d.%d.%d)\n", |
| 252 | adreno_gpu->info->revn, adreno_gpu->rev.core, |
| 253 | adreno_gpu->rev.major, adreno_gpu->rev.minor, |
| 254 | adreno_gpu->rev.patchid); |
| 255 | |
| 256 | seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, |
Rob Clark | bd6f82d | 2013-08-24 14:20:38 -0400 | [diff] [blame] | 257 | gpu->submitted_fence); |
Craig Stout | 7d0c5ee | 2016-02-18 16:50:02 -0800 | [diff] [blame] | 258 | seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu)); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 259 | seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); |
| 260 | seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); |
Rob Clark | 3bcefb0 | 2014-09-05 15:05:38 -0400 | [diff] [blame] | 261 | |
| 262 | gpu->funcs->pm_resume(gpu); |
| 263 | |
| 264 | /* dump these out in a form that can be parsed by demsm: */ |
| 265 | seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name); |
| 266 | for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { |
| 267 | uint32_t start = adreno_gpu->registers[i]; |
| 268 | uint32_t end = adreno_gpu->registers[i+1]; |
| 269 | uint32_t addr; |
| 270 | |
| 271 | for (addr = start; addr <= end; addr++) { |
| 272 | uint32_t val = gpu_read(gpu, addr); |
| 273 | seq_printf(m, "IO:R %08x %08x\n", addr<<2, val); |
| 274 | } |
| 275 | } |
| 276 | |
| 277 | gpu->funcs->pm_suspend(gpu); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 278 | } |
| 279 | #endif |
| 280 | |
Rob Clark | 2671618 | 2015-04-19 10:14:09 -0400 | [diff] [blame] | 281 | /* Dump common gpu status and scratch registers on any hang, to make |
| 282 | * the hangcheck logs more useful. The scratch registers seem always |
| 283 | * safe to read when GPU has hung (unlike some other regs, depending |
| 284 | * on how the GPU hung), and they are useful to match up to cmdstream |
| 285 | * dumps when debugging hangs: |
| 286 | */ |
| 287 | void adreno_dump_info(struct msm_gpu *gpu) |
Rob Clark | 5b6ef08 | 2013-12-22 10:29:43 -0500 | [diff] [blame] | 288 | { |
| 289 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 3bcefb0 | 2014-09-05 15:05:38 -0400 | [diff] [blame] | 290 | int i; |
Rob Clark | 5b6ef08 | 2013-12-22 10:29:43 -0500 | [diff] [blame] | 291 | |
| 292 | printk("revision: %d (%d.%d.%d.%d)\n", |
| 293 | adreno_gpu->info->revn, adreno_gpu->rev.core, |
| 294 | adreno_gpu->rev.major, adreno_gpu->rev.minor, |
| 295 | adreno_gpu->rev.patchid); |
| 296 | |
| 297 | printk("fence: %d/%d\n", adreno_gpu->memptrs->fence, |
| 298 | gpu->submitted_fence); |
Craig Stout | 7d0c5ee | 2016-02-18 16:50:02 -0800 | [diff] [blame] | 299 | printk("rptr: %d\n", get_rptr(adreno_gpu)); |
Rob Clark | 5b6ef08 | 2013-12-22 10:29:43 -0500 | [diff] [blame] | 300 | printk("wptr: %d\n", adreno_gpu->memptrs->wptr); |
| 301 | printk("rb wptr: %d\n", get_wptr(gpu->rb)); |
| 302 | |
Rob Clark | 2671618 | 2015-04-19 10:14:09 -0400 | [diff] [blame] | 303 | for (i = 0; i < 8; i++) { |
| 304 | printk("CP_SCRATCH_REG%d: %u\n", i, |
| 305 | gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i)); |
| 306 | } |
| 307 | } |
| 308 | |
| 309 | /* would be nice to not have to duplicate the _show() stuff with printk(): */ |
| 310 | void adreno_dump(struct msm_gpu *gpu) |
| 311 | { |
| 312 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
| 313 | int i; |
| 314 | |
Rob Clark | 3bcefb0 | 2014-09-05 15:05:38 -0400 | [diff] [blame] | 315 | /* dump these out in a form that can be parsed by demsm: */ |
| 316 | printk("IO:region %s 00000000 00020000\n", gpu->name); |
| 317 | for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { |
| 318 | uint32_t start = adreno_gpu->registers[i]; |
| 319 | uint32_t end = adreno_gpu->registers[i+1]; |
| 320 | uint32_t addr; |
| 321 | |
| 322 | for (addr = start; addr <= end; addr++) { |
| 323 | uint32_t val = gpu_read(gpu, addr); |
| 324 | printk("IO:R %08x %08x\n", addr<<2, val); |
| 325 | } |
| 326 | } |
Rob Clark | 5b6ef08 | 2013-12-22 10:29:43 -0500 | [diff] [blame] | 327 | } |
| 328 | |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 329 | static uint32_t ring_freewords(struct msm_gpu *gpu) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 330 | { |
| 331 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 332 | uint32_t size = gpu->rb->size / 4; |
| 333 | uint32_t wptr = get_wptr(gpu->rb); |
Craig Stout | 7d0c5ee | 2016-02-18 16:50:02 -0800 | [diff] [blame] | 334 | uint32_t rptr = get_rptr(adreno_gpu); |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 335 | return (rptr + (size - 1) - wptr) % size; |
| 336 | } |
Rob Clark | 26791c4 | 2013-09-03 07:12:03 -0400 | [diff] [blame] | 337 | |
Rob Clark | 0963756 | 2014-01-11 16:11:59 -0500 | [diff] [blame] | 338 | void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) |
| 339 | { |
| 340 | if (spin_until(ring_freewords(gpu) >= ndwords)) |
| 341 | DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 342 | } |
| 343 | |
| 344 | static const char *iommu_ports[] = { |
| 345 | "gfx3d_user", "gfx3d_priv", |
| 346 | "gfx3d1_user", "gfx3d1_priv", |
| 347 | }; |
| 348 | |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 349 | int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 350 | struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs) |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 351 | { |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 352 | struct adreno_platform_config *config = pdev->dev.platform_data; |
| 353 | struct msm_gpu *gpu = &adreno_gpu->base; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 354 | struct msm_mmu *mmu; |
Rob Clark | e2550b7 | 2014-09-05 13:30:27 -0400 | [diff] [blame] | 355 | int ret; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 356 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 357 | adreno_gpu->funcs = funcs; |
| 358 | adreno_gpu->info = adreno_info(config->rev); |
| 359 | adreno_gpu->gmem = adreno_gpu->info->gmem; |
| 360 | adreno_gpu->revn = adreno_gpu->info->revn; |
| 361 | adreno_gpu->rev = config->rev; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 362 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 363 | gpu->fast_rate = config->fast_rate; |
| 364 | gpu->slow_rate = config->slow_rate; |
| 365 | gpu->bus_freq = config->bus_freq; |
Rob Clark | 6490ad4 | 2015-06-04 10:26:37 -0400 | [diff] [blame] | 366 | #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 367 | gpu->bus_scale_table = config->bus_scale_table; |
| 368 | #endif |
| 369 | |
| 370 | DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", |
| 371 | gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); |
| 372 | |
Rob Clark | 0122f96 | 2014-10-31 11:50:55 -0400 | [diff] [blame] | 373 | ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, |
| 374 | adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq", |
| 375 | RB_SIZE); |
| 376 | if (ret) |
| 377 | return ret; |
| 378 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 379 | ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 380 | if (ret) { |
| 381 | dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 382 | adreno_gpu->info->pm4fw, ret); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 383 | return ret; |
| 384 | } |
| 385 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 386 | ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 387 | if (ret) { |
| 388 | dev_err(drm->dev, "failed to load %s PFP firmware: %d\n", |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 389 | adreno_gpu->info->pfpfw, ret); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 390 | return ret; |
| 391 | } |
| 392 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 393 | mmu = gpu->mmu; |
Rob Clark | 871d812 | 2013-11-16 12:56:06 -0500 | [diff] [blame] | 394 | if (mmu) { |
| 395 | ret = mmu->funcs->attach(mmu, iommu_ports, |
| 396 | ARRAY_SIZE(iommu_ports)); |
| 397 | if (ret) |
| 398 | return ret; |
| 399 | } |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 400 | |
Rob Clark | a1ad352 | 2014-07-11 11:59:22 -0400 | [diff] [blame] | 401 | mutex_lock(&drm->struct_mutex); |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 402 | adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 403 | MSM_BO_UNCACHED); |
Rob Clark | a1ad352 | 2014-07-11 11:59:22 -0400 | [diff] [blame] | 404 | mutex_unlock(&drm->struct_mutex); |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 405 | if (IS_ERR(adreno_gpu->memptrs_bo)) { |
| 406 | ret = PTR_ERR(adreno_gpu->memptrs_bo); |
| 407 | adreno_gpu->memptrs_bo = NULL; |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 408 | dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); |
| 409 | return ret; |
| 410 | } |
| 411 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 412 | adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); |
| 413 | if (!adreno_gpu->memptrs) { |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 414 | dev_err(drm->dev, "could not vmap memptrs\n"); |
| 415 | return -ENOMEM; |
| 416 | } |
| 417 | |
Rob Clark | 3526e9f | 2014-09-05 15:03:40 -0400 | [diff] [blame] | 418 | ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id, |
| 419 | &adreno_gpu->memptrs_iova); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 420 | if (ret) { |
| 421 | dev_err(drm->dev, "could not map memptrs: %d\n", ret); |
| 422 | return ret; |
| 423 | } |
| 424 | |
| 425 | return 0; |
| 426 | } |
| 427 | |
| 428 | void adreno_gpu_cleanup(struct adreno_gpu *gpu) |
| 429 | { |
| 430 | if (gpu->memptrs_bo) { |
| 431 | if (gpu->memptrs_iova) |
| 432 | msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); |
Rob Clark | 774449e | 2015-05-15 09:19:36 -0400 | [diff] [blame] | 433 | drm_gem_object_unreference_unlocked(gpu->memptrs_bo); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 434 | } |
Markus Elfring | 5acb07e | 2014-11-25 13:44:20 +0100 | [diff] [blame] | 435 | release_firmware(gpu->pm4); |
| 436 | release_firmware(gpu->pfp); |
Rob Clark | 7198e6b | 2013-07-19 12:59:32 -0400 | [diff] [blame] | 437 | msm_gpu_cleanup(&gpu->base); |
| 438 | } |