Akhil P Oommen | 76518d9 | 2020-07-16 00:49:09 +0530 | [diff] [blame] | 1 | /* Copyright (c) 2017-2018,2020, The Linux Foundation. All rights reserved. |
Akhil P Oommen | 787b830 | 2023-03-28 21:18:04 +0530 | [diff] [blame] | 2 | * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 and |
| 6 | * only version 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | |
| 14 | #include "adreno.h" |
| 15 | #include "adreno_a6xx.h" |
| 16 | #include "a6xx_reg.h" |
| 17 | #include "adreno_trace.h" |
| 18 | #include "adreno_pm4types.h" |
| 19 | |
| 20 | #define PREEMPT_RECORD(_field) \ |
| 21 | offsetof(struct a6xx_cp_preemption_record, _field) |
| 22 | |
| 23 | #define PREEMPT_SMMU_RECORD(_field) \ |
| 24 | offsetof(struct a6xx_cp_smmu_info, _field) |
| 25 | |
| 26 | enum { |
| 27 | SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO = 0, |
| 28 | SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_NON_SECURE_SAVE_ADDR, |
| 29 | SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR, |
| 30 | SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR, |
| 31 | SET_PSEUDO_REGISTER_SAVE_REGISTER_COUNTER, |
| 32 | }; |
| 33 | |
| 34 | static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer) |
| 35 | { |
| 36 | struct adreno_ringbuffer *rb = adreno_dev->cur_rb; |
| 37 | unsigned int wptr; |
| 38 | unsigned long flags; |
Harshdeep Dhatt | 664ee63 | 2017-11-01 15:03:11 -0600 | [diff] [blame] | 39 | struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); |
| 40 | |
| 41 | /* |
| 42 | * Need to make sure GPU is up before we read the |
| 43 | * WPTR as fence doesn't wake GPU on read operation. |
| 44 | */ |
| 45 | if (in_interrupt() == 0) { |
| 46 | int status; |
| 47 | |
| 48 | if (gpudev->oob_set) { |
| 49 | status = gpudev->oob_set(adreno_dev, |
| 50 | OOB_PREEMPTION_SET_MASK, |
| 51 | OOB_PREEMPTION_CHECK_MASK, |
| 52 | OOB_PREEMPTION_CLEAR_MASK); |
Deepak Kumar | 61384e6 | 2018-06-05 13:02:10 +0530 | [diff] [blame] | 53 | if (status) { |
| 54 | adreno_set_gpu_fault(adreno_dev, |
| 55 | ADRENO_GMU_FAULT); |
| 56 | adreno_dispatcher_schedule( |
| 57 | KGSL_DEVICE(adreno_dev)); |
Harshdeep Dhatt | 664ee63 | 2017-11-01 15:03:11 -0600 | [diff] [blame] | 58 | return; |
Deepak Kumar | 61384e6 | 2018-06-05 13:02:10 +0530 | [diff] [blame] | 59 | } |
Harshdeep Dhatt | 664ee63 | 2017-11-01 15:03:11 -0600 | [diff] [blame] | 60 | } |
| 61 | } |
| 62 | |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 63 | |
| 64 | spin_lock_irqsave(&rb->preempt_lock, flags); |
| 65 | |
| 66 | adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr); |
| 67 | |
| 68 | if (wptr != rb->wptr) { |
| 69 | adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, |
| 70 | rb->wptr); |
| 71 | /* |
| 72 | * In case something got submitted while preemption was on |
| 73 | * going, reset the timer. |
| 74 | */ |
| 75 | reset_timer = true; |
| 76 | } |
| 77 | |
| 78 | if (reset_timer) |
| 79 | rb->dispatch_q.expires = jiffies + |
| 80 | msecs_to_jiffies(adreno_drawobj_timeout); |
| 81 | |
| 82 | spin_unlock_irqrestore(&rb->preempt_lock, flags); |
Harshdeep Dhatt | 664ee63 | 2017-11-01 15:03:11 -0600 | [diff] [blame] | 83 | |
| 84 | if (in_interrupt() == 0) { |
| 85 | if (gpudev->oob_clear) |
| 86 | gpudev->oob_clear(adreno_dev, |
| 87 | OOB_PREEMPTION_CLEAR_MASK); |
| 88 | } |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 89 | } |
| 90 | |
| 91 | static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev, |
| 92 | enum adreno_preempt_states old, enum adreno_preempt_states new) |
| 93 | { |
| 94 | return (atomic_cmpxchg(&adreno_dev->preempt.state, old, new) == old); |
| 95 | } |
| 96 | |
| 97 | static void _a6xx_preemption_done(struct adreno_device *adreno_dev) |
| 98 | { |
| 99 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 100 | unsigned int status; |
| 101 | |
| 102 | /* |
| 103 | * In the very unlikely case that the power is off, do nothing - the |
| 104 | * state will be reset on power up and everybody will be happy |
| 105 | */ |
| 106 | |
| 107 | if (!kgsl_state_is_awake(device)) |
| 108 | return; |
| 109 | |
| 110 | adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status); |
| 111 | |
| 112 | if (status & 0x1) { |
| 113 | KGSL_DRV_ERR(device, |
| 114 | "Preemption not complete: status=%X cur=%d R/W=%X/%X next=%d R/W=%X/%X\n", |
| 115 | status, adreno_dev->cur_rb->id, |
| 116 | adreno_get_rptr(adreno_dev->cur_rb), |
| 117 | adreno_dev->cur_rb->wptr, adreno_dev->next_rb->id, |
| 118 | adreno_get_rptr(adreno_dev->next_rb), |
| 119 | adreno_dev->next_rb->wptr); |
| 120 | |
| 121 | /* Set a fault and restart */ |
| 122 | adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT); |
| 123 | adreno_dispatcher_schedule(device); |
| 124 | |
| 125 | return; |
| 126 | } |
| 127 | |
| 128 | del_timer_sync(&adreno_dev->preempt.timer); |
| 129 | |
Harshdeep Dhatt | 003f6cf | 2017-12-14 11:00:22 -0700 | [diff] [blame] | 130 | adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_LEVEL_STATUS, &status); |
| 131 | |
| 132 | trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb, |
| 133 | status); |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 134 | |
| 135 | /* Clean up all the bits */ |
| 136 | adreno_dev->prev_rb = adreno_dev->cur_rb; |
| 137 | adreno_dev->cur_rb = adreno_dev->next_rb; |
| 138 | adreno_dev->next_rb = NULL; |
| 139 | |
| 140 | /* Update the wptr for the new command queue */ |
| 141 | _update_wptr(adreno_dev, true); |
| 142 | |
| 143 | /* Update the dispatcher timer for the new command queue */ |
| 144 | mod_timer(&adreno_dev->dispatcher.timer, |
| 145 | adreno_dev->cur_rb->dispatch_q.expires); |
| 146 | |
| 147 | /* Clear the preempt state */ |
| 148 | adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); |
| 149 | } |
| 150 | |
| 151 | static void _a6xx_preemption_fault(struct adreno_device *adreno_dev) |
| 152 | { |
| 153 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 154 | unsigned int status; |
| 155 | |
| 156 | /* |
| 157 | * If the power is on check the preemption status one more time - if it |
| 158 | * was successful then just transition to the complete state |
| 159 | */ |
| 160 | if (kgsl_state_is_awake(device)) { |
| 161 | adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status); |
| 162 | |
| 163 | if (status == 0) { |
| 164 | adreno_set_preempt_state(adreno_dev, |
| 165 | ADRENO_PREEMPT_COMPLETE); |
| 166 | |
| 167 | adreno_dispatcher_schedule(device); |
| 168 | return; |
| 169 | } |
| 170 | } |
| 171 | |
| 172 | KGSL_DRV_ERR(device, |
| 173 | "Preemption timed out: cur=%d R/W=%X/%X, next=%d R/W=%X/%X\n", |
| 174 | adreno_dev->cur_rb->id, |
| 175 | adreno_get_rptr(adreno_dev->cur_rb), adreno_dev->cur_rb->wptr, |
| 176 | adreno_dev->next_rb->id, |
| 177 | adreno_get_rptr(adreno_dev->next_rb), |
| 178 | adreno_dev->next_rb->wptr); |
| 179 | |
| 180 | adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT); |
| 181 | adreno_dispatcher_schedule(device); |
| 182 | } |
| 183 | |
| 184 | static void _a6xx_preemption_worker(struct work_struct *work) |
| 185 | { |
| 186 | struct adreno_preemption *preempt = container_of(work, |
| 187 | struct adreno_preemption, work); |
| 188 | struct adreno_device *adreno_dev = container_of(preempt, |
| 189 | struct adreno_device, preempt); |
| 190 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 191 | |
| 192 | /* Need to take the mutex to make sure that the power stays on */ |
| 193 | mutex_lock(&device->mutex); |
| 194 | |
| 195 | if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_FAULTED)) |
| 196 | _a6xx_preemption_fault(adreno_dev); |
| 197 | |
| 198 | mutex_unlock(&device->mutex); |
| 199 | } |
| 200 | |
| 201 | static void _a6xx_preemption_timer(unsigned long data) |
| 202 | { |
| 203 | struct adreno_device *adreno_dev = (struct adreno_device *) data; |
| 204 | |
| 205 | /* We should only be here from a triggered state */ |
| 206 | if (!adreno_move_preempt_state(adreno_dev, |
| 207 | ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_FAULTED)) |
| 208 | return; |
| 209 | |
| 210 | /* Schedule the worker to take care of the details */ |
| 211 | queue_work(system_unbound_wq, &adreno_dev->preempt.work); |
| 212 | } |
| 213 | |
| 214 | /* Find the highest priority active ringbuffer */ |
| 215 | static struct adreno_ringbuffer *a6xx_next_ringbuffer( |
| 216 | struct adreno_device *adreno_dev) |
| 217 | { |
| 218 | struct adreno_ringbuffer *rb; |
| 219 | unsigned long flags; |
| 220 | unsigned int i; |
| 221 | |
| 222 | FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { |
| 223 | bool empty; |
| 224 | |
| 225 | spin_lock_irqsave(&rb->preempt_lock, flags); |
| 226 | empty = adreno_rb_empty(rb); |
| 227 | spin_unlock_irqrestore(&rb->preempt_lock, flags); |
| 228 | |
| 229 | if (empty == false) |
| 230 | return rb; |
| 231 | } |
| 232 | |
| 233 | return NULL; |
| 234 | } |
| 235 | |
Deepak Kumar | f6c260d | 2018-06-27 14:57:44 +0530 | [diff] [blame] | 236 | #define GMU_ACTIVE_STATE_RETRY_MAX 100 |
| 237 | |
| 238 | static int adreno_gmu_wait_for_active(struct adreno_device *adreno_dev) |
| 239 | { |
| 240 | unsigned int reg, num_retries = 0; |
| 241 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 242 | |
| 243 | if (!kgsl_gmu_isenabled(device)) |
| 244 | return 0; |
| 245 | |
| 246 | kgsl_gmu_regread(device, |
| 247 | A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, ®); |
| 248 | |
| 249 | while (reg != GPU_HW_ACTIVE) { |
| 250 | /* Wait for small time before trying again */ |
| 251 | udelay(5); |
| 252 | kgsl_gmu_regread(device, |
| 253 | A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, ®); |
| 254 | |
| 255 | if (num_retries == GMU_ACTIVE_STATE_RETRY_MAX && |
| 256 | reg != GPU_HW_ACTIVE) { |
| 257 | dev_err(adreno_dev->dev.dev, |
| 258 | "GMU failed to move to ACTIVE state: 0x%x\n", |
| 259 | reg); |
| 260 | return -ETIMEDOUT; |
| 261 | } |
| 262 | num_retries++; |
| 263 | } |
| 264 | |
| 265 | return 0; |
| 266 | } |
| 267 | |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 268 | void a6xx_preemption_trigger(struct adreno_device *adreno_dev) |
| 269 | { |
| 270 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 271 | struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device); |
| 272 | struct adreno_ringbuffer *next; |
Harshdeep Dhatt | 6d9eff9 | 2017-10-10 12:14:18 -0600 | [diff] [blame] | 273 | uint64_t ttbr0, gpuaddr; |
Harshdeep Dhatt | 003f6cf | 2017-12-14 11:00:22 -0700 | [diff] [blame] | 274 | unsigned int contextidr, cntl; |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 275 | unsigned long flags; |
Harshdeep Dhatt | a8ec51c | 2017-09-21 17:24:08 -0600 | [diff] [blame] | 276 | struct adreno_preemption *preempt = &adreno_dev->preempt; |
Harshdeep Dhatt | 26c54f2 | 2017-08-30 17:37:39 -0600 | [diff] [blame] | 277 | |
Harshdeep Dhatt | 003f6cf | 2017-12-14 11:00:22 -0700 | [diff] [blame] | 278 | cntl = (((preempt->preempt_level << 6) & 0xC0) | |
| 279 | ((preempt->skipsaverestore << 9) & 0x200) | |
| 280 | ((preempt->usesgmem << 8) & 0x100) | 0x1); |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 281 | |
| 282 | /* Put ourselves into a possible trigger state */ |
| 283 | if (!adreno_move_preempt_state(adreno_dev, |
| 284 | ADRENO_PREEMPT_NONE, ADRENO_PREEMPT_START)) |
| 285 | return; |
| 286 | |
| 287 | /* Get the next ringbuffer to preempt in */ |
| 288 | next = a6xx_next_ringbuffer(adreno_dev); |
| 289 | |
| 290 | /* |
| 291 | * Nothing to do if every ringbuffer is empty or if the current |
| 292 | * ringbuffer is the only active one |
| 293 | */ |
| 294 | if (next == NULL || next == adreno_dev->cur_rb) { |
| 295 | /* |
| 296 | * Update any critical things that might have been skipped while |
| 297 | * we were looking for a new ringbuffer |
| 298 | */ |
| 299 | |
| 300 | if (next != NULL) { |
| 301 | _update_wptr(adreno_dev, false); |
| 302 | |
| 303 | mod_timer(&adreno_dev->dispatcher.timer, |
| 304 | adreno_dev->cur_rb->dispatch_q.expires); |
| 305 | } |
| 306 | |
| 307 | adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); |
| 308 | return; |
| 309 | } |
| 310 | |
| 311 | /* Turn off the dispatcher timer */ |
| 312 | del_timer(&adreno_dev->dispatcher.timer); |
| 313 | |
| 314 | /* |
| 315 | * This is the most critical section - we need to take care not to race |
| 316 | * until we have programmed the CP for the switch |
| 317 | */ |
| 318 | |
| 319 | spin_lock_irqsave(&next->preempt_lock, flags); |
| 320 | |
| 321 | /* |
| 322 | * Get the pagetable from the pagetable info. |
| 323 | * The pagetable_desc is allocated and mapped at probe time, and |
| 324 | * preemption_desc at init time, so no need to check if |
| 325 | * sharedmem accesses to these memdescs succeed. |
| 326 | */ |
| 327 | kgsl_sharedmem_readq(&next->pagetable_desc, &ttbr0, |
| 328 | PT_INFO_OFFSET(ttbr0)); |
| 329 | kgsl_sharedmem_readl(&next->pagetable_desc, &contextidr, |
| 330 | PT_INFO_OFFSET(contextidr)); |
| 331 | |
| 332 | kgsl_sharedmem_writel(device, &next->preemption_desc, |
| 333 | PREEMPT_RECORD(wptr), next->wptr); |
| 334 | |
Harshdeep Dhatt | 4ab35b1 | 2017-11-16 08:34:39 -0700 | [diff] [blame] | 335 | preempt->count++; |
| 336 | |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 337 | spin_unlock_irqrestore(&next->preempt_lock, flags); |
| 338 | |
| 339 | /* And write it to the smmu info */ |
| 340 | kgsl_sharedmem_writeq(device, &iommu->smmu_info, |
| 341 | PREEMPT_SMMU_RECORD(ttbr0), ttbr0); |
| 342 | kgsl_sharedmem_writel(device, &iommu->smmu_info, |
| 343 | PREEMPT_SMMU_RECORD(context_idr), contextidr); |
| 344 | |
Akhil P Oommen | 76518d9 | 2020-07-16 00:49:09 +0530 | [diff] [blame] | 345 | kgsl_sharedmem_readq(&preempt->scratch, &gpuaddr, |
| 346 | next->id * sizeof(u64)); |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 347 | |
Harshdeep Dhatt | 59a6957 | 2017-11-01 14:46:13 -0600 | [diff] [blame] | 348 | /* |
Harshdeep Dhatt | 4a288a0 | 2017-11-09 14:39:51 -0700 | [diff] [blame] | 349 | * Set a keepalive bit before the first preemption register write. |
| 350 | * This is required since while each individual write to the context |
| 351 | * switch registers will wake the GPU from collapse, it will not in |
| 352 | * itself cause GPU activity. Thus, the GPU could technically be |
| 353 | * re-collapsed between subsequent register writes leading to a |
| 354 | * prolonged preemption sequence. The keepalive bit prevents any |
| 355 | * further power collapse while it is set. |
| 356 | * It is more efficient to use a keepalive+wake-on-fence approach here |
| 357 | * rather than an OOB. Both keepalive and the fence are effectively |
| 358 | * free when the GPU is already powered on, whereas an OOB requires an |
| 359 | * unconditional handshake with the GMU. |
| 360 | */ |
| 361 | kgsl_gmu_regrmw(device, A6XX_GMU_AO_SPARE_CNTL, 0x0, 0x2); |
| 362 | |
| 363 | /* |
Harshdeep Dhatt | 59a6957 | 2017-11-01 14:46:13 -0600 | [diff] [blame] | 364 | * Fenced writes on this path will make sure the GPU is woken up |
| 365 | * in case it was power collapsed by the GMU. |
| 366 | */ |
| 367 | adreno_gmu_fenced_write(adreno_dev, |
| 368 | ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO, |
| 369 | lower_32_bits(next->preemption_desc.gpuaddr), |
| 370 | FENCE_STATUS_WRITEDROPPED1_MASK); |
| 371 | |
| 372 | adreno_gmu_fenced_write(adreno_dev, |
| 373 | ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI, |
| 374 | upper_32_bits(next->preemption_desc.gpuaddr), |
| 375 | FENCE_STATUS_WRITEDROPPED1_MASK); |
| 376 | |
| 377 | adreno_gmu_fenced_write(adreno_dev, |
Harshdeep Dhatt | 58b70eb | 2017-03-28 09:21:40 -0600 | [diff] [blame] | 378 | ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO, |
| 379 | lower_32_bits(next->secure_preemption_desc.gpuaddr), |
| 380 | FENCE_STATUS_WRITEDROPPED1_MASK); |
| 381 | |
| 382 | adreno_gmu_fenced_write(adreno_dev, |
| 383 | ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI, |
| 384 | upper_32_bits(next->secure_preemption_desc.gpuaddr), |
| 385 | FENCE_STATUS_WRITEDROPPED1_MASK); |
| 386 | |
| 387 | adreno_gmu_fenced_write(adreno_dev, |
Harshdeep Dhatt | 59a6957 | 2017-11-01 14:46:13 -0600 | [diff] [blame] | 388 | ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO, |
| 389 | lower_32_bits(gpuaddr), |
| 390 | FENCE_STATUS_WRITEDROPPED1_MASK); |
| 391 | |
| 392 | adreno_gmu_fenced_write(adreno_dev, |
| 393 | ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI, |
| 394 | upper_32_bits(gpuaddr), |
| 395 | FENCE_STATUS_WRITEDROPPED1_MASK); |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 396 | |
Deepak Kumar | f6c260d | 2018-06-27 14:57:44 +0530 | [diff] [blame] | 397 | /* |
| 398 | * Above fence writes will make sure GMU comes out of |
| 399 | * IFPC state if its was in IFPC state but it doesn't |
| 400 | * guarantee that GMU FW actually moved to ACTIVE state |
| 401 | * i.e. wake-up from IFPC is complete. |
| 402 | * Wait for GMU to move to ACTIVE state before triggering |
| 403 | * preemption. This is require to make sure CP doesn't |
| 404 | * interrupt GMU during wake-up from IFPC. |
| 405 | */ |
| 406 | if (adreno_gmu_wait_for_active(adreno_dev)) { |
| 407 | adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); |
| 408 | |
| 409 | adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT); |
| 410 | adreno_dispatcher_schedule(device); |
| 411 | return; |
| 412 | } |
| 413 | |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 414 | adreno_dev->next_rb = next; |
| 415 | |
| 416 | /* Start the timer to detect a stuck preemption */ |
| 417 | mod_timer(&adreno_dev->preempt.timer, |
| 418 | jiffies + msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT)); |
| 419 | |
Harshdeep Dhatt | 003f6cf | 2017-12-14 11:00:22 -0700 | [diff] [blame] | 420 | trace_adreno_preempt_trigger(adreno_dev->cur_rb, adreno_dev->next_rb, |
| 421 | cntl); |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 422 | |
| 423 | adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED); |
| 424 | |
| 425 | /* Trigger the preemption */ |
Harshdeep Dhatt | 003f6cf | 2017-12-14 11:00:22 -0700 | [diff] [blame] | 426 | adreno_gmu_fenced_write(adreno_dev, ADRENO_REG_CP_PREEMPT, cntl, |
Harshdeep Dhatt | 59a6957 | 2017-11-01 14:46:13 -0600 | [diff] [blame] | 427 | FENCE_STATUS_WRITEDROPPED1_MASK); |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 428 | } |
| 429 | |
| 430 | void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit) |
| 431 | { |
| 432 | unsigned int status; |
| 433 | |
| 434 | if (!adreno_move_preempt_state(adreno_dev, |
| 435 | ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_PENDING)) |
| 436 | return; |
| 437 | |
| 438 | adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status); |
| 439 | |
| 440 | if (status & 0x1) { |
| 441 | KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev), |
| 442 | "preempt interrupt with non-zero status: %X\n", status); |
| 443 | |
| 444 | /* |
| 445 | * Under the assumption that this is a race between the |
| 446 | * interrupt and the register, schedule the worker to clean up. |
| 447 | * If the status still hasn't resolved itself by the time we get |
| 448 | * there then we have to assume something bad happened |
| 449 | */ |
| 450 | adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE); |
| 451 | adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev)); |
| 452 | return; |
| 453 | } |
| 454 | |
Harshdeep Dhatt | 106901f | 2017-12-08 14:31:42 -0700 | [diff] [blame] | 455 | /* |
| 456 | * We can now safely clear the preemption keepalive bit, allowing |
| 457 | * power collapse to resume its regular activity. |
| 458 | */ |
| 459 | kgsl_gmu_regrmw(KGSL_DEVICE(adreno_dev), A6XX_GMU_AO_SPARE_CNTL, 0x2, |
| 460 | 0x0); |
| 461 | |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 462 | del_timer(&adreno_dev->preempt.timer); |
| 463 | |
Harshdeep Dhatt | 003f6cf | 2017-12-14 11:00:22 -0700 | [diff] [blame] | 464 | adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_LEVEL_STATUS, &status); |
| 465 | |
| 466 | trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb, |
| 467 | status); |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 468 | |
| 469 | adreno_dev->prev_rb = adreno_dev->cur_rb; |
| 470 | adreno_dev->cur_rb = adreno_dev->next_rb; |
| 471 | adreno_dev->next_rb = NULL; |
| 472 | |
| 473 | /* Update the wptr if it changed while preemption was ongoing */ |
| 474 | _update_wptr(adreno_dev, true); |
| 475 | |
| 476 | /* Update the dispatcher timer for the new command queue */ |
| 477 | mod_timer(&adreno_dev->dispatcher.timer, |
| 478 | adreno_dev->cur_rb->dispatch_q.expires); |
| 479 | |
| 480 | adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); |
| 481 | |
| 482 | a6xx_preemption_trigger(adreno_dev); |
| 483 | } |
| 484 | |
| 485 | void a6xx_preemption_schedule(struct adreno_device *adreno_dev) |
| 486 | { |
| 487 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 488 | |
Harshdeep Dhatt | 7ee8a86 | 2017-11-20 17:51:54 -0700 | [diff] [blame] | 489 | if (!adreno_is_preemption_enabled(adreno_dev)) |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 490 | return; |
| 491 | |
| 492 | mutex_lock(&device->mutex); |
| 493 | |
| 494 | if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE)) |
| 495 | _a6xx_preemption_done(adreno_dev); |
| 496 | |
| 497 | a6xx_preemption_trigger(adreno_dev); |
| 498 | |
| 499 | mutex_unlock(&device->mutex); |
| 500 | } |
| 501 | |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 502 | unsigned int a6xx_preemption_pre_ibsubmit( |
| 503 | struct adreno_device *adreno_dev, |
| 504 | struct adreno_ringbuffer *rb, |
| 505 | unsigned int *cmds, struct kgsl_context *context) |
| 506 | { |
| 507 | unsigned int *cmds_orig = cmds; |
Harshdeep Dhatt | 6d9eff9 | 2017-10-10 12:14:18 -0600 | [diff] [blame] | 508 | uint64_t gpuaddr = 0; |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 509 | |
Harshdeep Dhatt | 6d9eff9 | 2017-10-10 12:14:18 -0600 | [diff] [blame] | 510 | if (context) { |
| 511 | gpuaddr = context->user_ctxt_record->memdesc.gpuaddr; |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 512 | *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 15); |
Harshdeep Dhatt | 6d9eff9 | 2017-10-10 12:14:18 -0600 | [diff] [blame] | 513 | } else { |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 514 | *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 12); |
Harshdeep Dhatt | 6d9eff9 | 2017-10-10 12:14:18 -0600 | [diff] [blame] | 515 | } |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 516 | |
| 517 | /* NULL SMMU_INFO buffer - we track in KMD */ |
| 518 | *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO; |
| 519 | cmds += cp_gpuaddr(adreno_dev, cmds, 0x0); |
| 520 | |
| 521 | *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_NON_SECURE_SAVE_ADDR; |
| 522 | cmds += cp_gpuaddr(adreno_dev, cmds, rb->preemption_desc.gpuaddr); |
| 523 | |
| 524 | *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR; |
Harshdeep Dhatt | 58b70eb | 2017-03-28 09:21:40 -0600 | [diff] [blame] | 525 | cmds += cp_gpuaddr(adreno_dev, cmds, |
| 526 | rb->secure_preemption_desc.gpuaddr); |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 527 | |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 528 | if (context) { |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 529 | |
| 530 | *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR; |
| 531 | cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr); |
| 532 | } |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 533 | |
| 534 | /* |
| 535 | * There is no need to specify this address when we are about to |
| 536 | * trigger preemption. This is because CP internally stores this |
| 537 | * address specified here in the CP_SET_PSEUDO_REGISTER payload to |
| 538 | * the context record and thus knows from where to restore |
| 539 | * the saved perfcounters for the new ringbuffer. |
| 540 | */ |
| 541 | *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_COUNTER; |
| 542 | cmds += cp_gpuaddr(adreno_dev, cmds, |
| 543 | rb->perfcounter_save_restore_desc.gpuaddr); |
| 544 | |
Harshdeep Dhatt | 6d9eff9 | 2017-10-10 12:14:18 -0600 | [diff] [blame] | 545 | if (context) { |
Harshdeep Dhatt | 6d9eff9 | 2017-10-10 12:14:18 -0600 | [diff] [blame] | 546 | struct adreno_context *drawctxt = ADRENO_CONTEXT(context); |
| 547 | struct adreno_ringbuffer *rb = drawctxt->rb; |
Pankaj Gupta | cbef96e | 2022-03-01 20:29:09 +0530 | [diff] [blame] | 548 | uint64_t dest = PREEMPT_SCRATCH_ADDR(adreno_dev, rb->id); |
Harshdeep Dhatt | 6d9eff9 | 2017-10-10 12:14:18 -0600 | [diff] [blame] | 549 | |
| 550 | *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2); |
| 551 | cmds += cp_gpuaddr(adreno_dev, cmds, dest); |
| 552 | *cmds++ = lower_32_bits(gpuaddr); |
| 553 | *cmds++ = upper_32_bits(gpuaddr); |
Pankaj Gupta | cbef96e | 2022-03-01 20:29:09 +0530 | [diff] [blame] | 554 | |
| 555 | /* |
| 556 | * Add a KMD post amble to clear the perf counters during |
| 557 | * preemption |
| 558 | */ |
| 559 | if (!adreno_dev->perfcounter) { |
Akhil P Oommen | 787b830 | 2023-03-28 21:18:04 +0530 | [diff] [blame] | 560 | u64 kmd_postamble_addr = SCRATCH_POSTAMBLE_ADDR |
| 561 | (KGSL_DEVICE(adreno_dev)); |
Pankaj Gupta | cbef96e | 2022-03-01 20:29:09 +0530 | [diff] [blame] | 562 | |
| 563 | *cmds++ = cp_type7_packet(CP_SET_AMBLE, 3); |
| 564 | *cmds++ = lower_32_bits(kmd_postamble_addr); |
| 565 | *cmds++ = upper_32_bits(kmd_postamble_addr); |
| 566 | *cmds++ = ((CP_KMD_AMBLE_TYPE << 20) | GENMASK(22, 20)) |
| 567 | | (adreno_dev->preempt.postamble_len | GENMASK(19, 0)); |
| 568 | } |
Harshdeep Dhatt | 6d9eff9 | 2017-10-10 12:14:18 -0600 | [diff] [blame] | 569 | } |
| 570 | |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 571 | return (unsigned int) (cmds - cmds_orig); |
| 572 | } |
| 573 | |
| 574 | unsigned int a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev, |
| 575 | unsigned int *cmds) |
| 576 | { |
| 577 | unsigned int *cmds_orig = cmds; |
Harshdeep Dhatt | 6d9eff9 | 2017-10-10 12:14:18 -0600 | [diff] [blame] | 578 | struct adreno_ringbuffer *rb = adreno_dev->cur_rb; |
| 579 | |
| 580 | if (rb) { |
Pankaj Gupta | cbef96e | 2022-03-01 20:29:09 +0530 | [diff] [blame] | 581 | uint64_t dest = PREEMPT_SCRATCH_ADDR(adreno_dev, rb->id); |
Harshdeep Dhatt | 6d9eff9 | 2017-10-10 12:14:18 -0600 | [diff] [blame] | 582 | |
| 583 | *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2); |
| 584 | cmds += cp_gpuaddr(adreno_dev, cmds, dest); |
| 585 | *cmds++ = 0; |
| 586 | *cmds++ = 0; |
| 587 | } |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 588 | |
| 589 | *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4); |
| 590 | cmds += cp_gpuaddr(adreno_dev, cmds, 0x0); |
| 591 | *cmds++ = 1; |
| 592 | *cmds++ = 0; |
| 593 | |
| 594 | return (unsigned int) (cmds - cmds_orig); |
| 595 | } |
| 596 | |
| 597 | void a6xx_preemption_start(struct adreno_device *adreno_dev) |
| 598 | { |
| 599 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 600 | struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device); |
| 601 | struct adreno_ringbuffer *rb; |
| 602 | unsigned int i; |
| 603 | |
Harshdeep Dhatt | 7ee8a86 | 2017-11-20 17:51:54 -0700 | [diff] [blame] | 604 | if (!adreno_is_preemption_enabled(adreno_dev)) |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 605 | return; |
| 606 | |
| 607 | /* Force the state to be clear */ |
| 608 | adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); |
| 609 | |
| 610 | /* smmu_info is allocated and mapped in a6xx_preemption_iommu_init */ |
| 611 | kgsl_sharedmem_writel(device, &iommu->smmu_info, |
| 612 | PREEMPT_SMMU_RECORD(magic), A6XX_CP_SMMU_INFO_MAGIC_REF); |
| 613 | kgsl_sharedmem_writeq(device, &iommu->smmu_info, |
| 614 | PREEMPT_SMMU_RECORD(ttbr0), MMU_DEFAULT_TTBR0(device)); |
| 615 | |
| 616 | /* The CP doesn't use the asid record, so poison it */ |
| 617 | kgsl_sharedmem_writel(device, &iommu->smmu_info, |
| 618 | PREEMPT_SMMU_RECORD(asid), 0xDECAFBAD); |
| 619 | kgsl_sharedmem_writel(device, &iommu->smmu_info, |
| 620 | PREEMPT_SMMU_RECORD(context_idr), |
| 621 | MMU_DEFAULT_CONTEXTIDR(device)); |
| 622 | |
| 623 | adreno_writereg64(adreno_dev, |
| 624 | ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO, |
| 625 | ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI, |
| 626 | iommu->smmu_info.gpuaddr); |
| 627 | |
| 628 | FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { |
| 629 | /* |
| 630 | * preemption_desc is allocated and mapped at init time, |
| 631 | * so no need to check sharedmem_writel return value |
| 632 | */ |
| 633 | kgsl_sharedmem_writel(device, &rb->preemption_desc, |
| 634 | PREEMPT_RECORD(rptr), 0); |
| 635 | kgsl_sharedmem_writel(device, &rb->preemption_desc, |
| 636 | PREEMPT_RECORD(wptr), 0); |
| 637 | |
| 638 | adreno_ringbuffer_set_pagetable(rb, |
| 639 | device->mmu.defaultpagetable); |
| 640 | } |
| 641 | } |
| 642 | |
| 643 | static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev, |
| 644 | struct adreno_ringbuffer *rb, uint64_t counteraddr) |
| 645 | { |
| 646 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 647 | int ret; |
| 648 | |
| 649 | ret = kgsl_allocate_global(device, &rb->preemption_desc, |
| 650 | A6XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED, |
| 651 | "preemption_desc"); |
| 652 | if (ret) |
| 653 | return ret; |
| 654 | |
Harshdeep Dhatt | 58b70eb | 2017-03-28 09:21:40 -0600 | [diff] [blame] | 655 | ret = kgsl_allocate_user(device, &rb->secure_preemption_desc, |
| 656 | A6XX_CP_CTXRECORD_SIZE_IN_BYTES, |
| 657 | KGSL_MEMFLAGS_SECURE | KGSL_MEMDESC_PRIVILEGED); |
| 658 | if (ret) |
| 659 | return ret; |
| 660 | |
| 661 | ret = kgsl_iommu_map_global_secure_pt_entry(device, |
| 662 | &rb->secure_preemption_desc); |
| 663 | if (ret) |
| 664 | return ret; |
| 665 | |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 666 | ret = kgsl_allocate_global(device, &rb->perfcounter_save_restore_desc, |
| 667 | A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE, 0, |
| 668 | KGSL_MEMDESC_PRIVILEGED, "perfcounter_save_restore_desc"); |
| 669 | if (ret) |
| 670 | return ret; |
| 671 | |
| 672 | kgsl_sharedmem_writel(device, &rb->preemption_desc, |
| 673 | PREEMPT_RECORD(magic), A6XX_CP_CTXRECORD_MAGIC_REF); |
| 674 | kgsl_sharedmem_writel(device, &rb->preemption_desc, |
| 675 | PREEMPT_RECORD(info), 0); |
| 676 | kgsl_sharedmem_writel(device, &rb->preemption_desc, |
| 677 | PREEMPT_RECORD(data), 0); |
| 678 | kgsl_sharedmem_writel(device, &rb->preemption_desc, |
| 679 | PREEMPT_RECORD(cntl), A6XX_CP_RB_CNTL_DEFAULT); |
| 680 | kgsl_sharedmem_writel(device, &rb->preemption_desc, |
| 681 | PREEMPT_RECORD(rptr), 0); |
| 682 | kgsl_sharedmem_writel(device, &rb->preemption_desc, |
| 683 | PREEMPT_RECORD(wptr), 0); |
| 684 | kgsl_sharedmem_writeq(device, &rb->preemption_desc, |
| 685 | PREEMPT_RECORD(rptr_addr), SCRATCH_RPTR_GPU_ADDR(device, |
| 686 | rb->id)); |
| 687 | kgsl_sharedmem_writeq(device, &rb->preemption_desc, |
| 688 | PREEMPT_RECORD(rbase), rb->buffer_desc.gpuaddr); |
| 689 | kgsl_sharedmem_writeq(device, &rb->preemption_desc, |
| 690 | PREEMPT_RECORD(counter), counteraddr); |
| 691 | |
| 692 | return 0; |
| 693 | } |
| 694 | |
| 695 | #ifdef CONFIG_QCOM_KGSL_IOMMU |
| 696 | static int a6xx_preemption_iommu_init(struct adreno_device *adreno_dev) |
| 697 | { |
| 698 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 699 | struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device); |
| 700 | |
| 701 | /* Allocate mem for storing preemption smmu record */ |
| 702 | return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE, |
| 703 | KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED, |
| 704 | "smmu_info"); |
| 705 | } |
| 706 | |
| 707 | static void a6xx_preemption_iommu_close(struct adreno_device *adreno_dev) |
| 708 | { |
| 709 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 710 | struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device); |
| 711 | |
| 712 | kgsl_free_global(device, &iommu->smmu_info); |
| 713 | } |
| 714 | #else |
| 715 | static int a6xx_preemption_iommu_init(struct adreno_device *adreno_dev) |
| 716 | { |
| 717 | return -ENODEV; |
| 718 | } |
| 719 | |
| 720 | static void a6xx_preemption_iommu_close(struct adreno_device *adreno_dev) |
| 721 | { |
| 722 | } |
| 723 | #endif |
| 724 | |
| 725 | static void a6xx_preemption_close(struct kgsl_device *device) |
| 726 | { |
| 727 | struct adreno_device *adreno_dev = ADRENO_DEVICE(device); |
| 728 | struct adreno_preemption *preempt = &adreno_dev->preempt; |
| 729 | struct adreno_ringbuffer *rb; |
| 730 | unsigned int i; |
| 731 | |
| 732 | del_timer(&preempt->timer); |
Akhil P Oommen | 76518d9 | 2020-07-16 00:49:09 +0530 | [diff] [blame] | 733 | kgsl_free_global(device, &preempt->scratch); |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 734 | a6xx_preemption_iommu_close(adreno_dev); |
| 735 | |
| 736 | FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { |
| 737 | kgsl_free_global(device, &rb->preemption_desc); |
| 738 | kgsl_free_global(device, &rb->perfcounter_save_restore_desc); |
Harshdeep Dhatt | 58b70eb | 2017-03-28 09:21:40 -0600 | [diff] [blame] | 739 | kgsl_iommu_unmap_global_secure_pt_entry(device, |
| 740 | &rb->secure_preemption_desc); |
| 741 | kgsl_sharedmem_free(&rb->secure_preemption_desc); |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 742 | } |
| 743 | } |
| 744 | |
| 745 | int a6xx_preemption_init(struct adreno_device *adreno_dev) |
| 746 | { |
| 747 | struct kgsl_device *device = KGSL_DEVICE(adreno_dev); |
| 748 | struct adreno_preemption *preempt = &adreno_dev->preempt; |
| 749 | struct adreno_ringbuffer *rb; |
| 750 | int ret; |
| 751 | unsigned int i; |
| 752 | uint64_t addr; |
| 753 | |
| 754 | /* We are dependent on IOMMU to make preemption go on the CP side */ |
| 755 | if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU) |
| 756 | return -ENODEV; |
| 757 | |
| 758 | INIT_WORK(&preempt->work, _a6xx_preemption_worker); |
| 759 | |
| 760 | setup_timer(&preempt->timer, _a6xx_preemption_timer, |
| 761 | (unsigned long) adreno_dev); |
| 762 | |
Akhil P Oommen | 76518d9 | 2020-07-16 00:49:09 +0530 | [diff] [blame] | 763 | /* |
| 764 | * Allocate a scratch buffer to keep the below table: |
| 765 | * Offset: What |
| 766 | * 0x0: Context Record address |
| 767 | * 0x10: Preemption Counters |
| 768 | */ |
| 769 | ret = kgsl_allocate_global(device, &preempt->scratch, PAGE_SIZE, 0, 0, |
| 770 | "preemption_scratch"); |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 771 | if (ret) |
| 772 | goto err; |
| 773 | |
Akhil P Oommen | 76518d9 | 2020-07-16 00:49:09 +0530 | [diff] [blame] | 774 | addr = preempt->scratch.gpuaddr + |
| 775 | KGSL_PRIORITY_MAX_RB_LEVELS * sizeof(u64); |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 776 | |
| 777 | /* Allocate mem for storing preemption switch record */ |
| 778 | FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { |
| 779 | ret = a6xx_preemption_ringbuffer_init(adreno_dev, rb, addr); |
| 780 | if (ret) |
| 781 | goto err; |
| 782 | |
| 783 | addr += A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE; |
| 784 | } |
| 785 | |
Pankaj Gupta | cbef96e | 2022-03-01 20:29:09 +0530 | [diff] [blame] | 786 | /* |
Akhil P Oommen | 787b830 | 2023-03-28 21:18:04 +0530 | [diff] [blame] | 787 | * First 28 dwords of the device scratch buffer are used to store |
| 788 | * shadow rb data. Reserve 11 dwords in the device scratch buffer |
| 789 | * from SCRATCH_POSTAMBLE_OFFSET for KMD postamble pm4 packets. |
| 790 | * This should be in *device->scratch* so that userspace cannot |
| 791 | * access it. |
Pankaj Gupta | cbef96e | 2022-03-01 20:29:09 +0530 | [diff] [blame] | 792 | */ |
| 793 | if (!adreno_dev->perfcounter) { |
Akhil P Oommen | 787b830 | 2023-03-28 21:18:04 +0530 | [diff] [blame] | 794 | u32 *postamble = device->scratch.hostptr + |
| 795 | SCRATCH_POSTAMBLE_OFFSET; |
Pankaj Gupta | cbef96e | 2022-03-01 20:29:09 +0530 | [diff] [blame] | 796 | u32 count = 0; |
| 797 | |
| 798 | postamble[count++] = cp_type7_packet(CP_REG_RMW, 3); |
| 799 | postamble[count++] = A6XX_RBBM_PERFCTR_SRAM_INIT_CMD; |
| 800 | postamble[count++] = 0x0; |
| 801 | postamble[count++] = 0x1; |
| 802 | |
| 803 | postamble[count++] = cp_type7_packet(CP_WAIT_REG_MEM, 6); |
| 804 | postamble[count++] = 0x3; |
| 805 | postamble[count++] = A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS; |
| 806 | postamble[count++] = 0x0; |
| 807 | postamble[count++] = 0x1; |
| 808 | postamble[count++] = 0x1; |
| 809 | postamble[count++] = 0x0; |
| 810 | |
| 811 | preempt->postamble_len = count; |
| 812 | } |
| 813 | |
Harshdeep Dhatt | 0cdc899 | 2017-05-31 15:44:05 -0600 | [diff] [blame] | 814 | ret = a6xx_preemption_iommu_init(adreno_dev); |
| 815 | |
| 816 | err: |
| 817 | if (ret) |
| 818 | a6xx_preemption_close(device); |
| 819 | |
| 820 | return ret; |
| 821 | } |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 822 | |
| 823 | void a6xx_preemption_context_destroy(struct kgsl_context *context) |
| 824 | { |
| 825 | struct kgsl_device *device = context->device; |
| 826 | struct adreno_device *adreno_dev = ADRENO_DEVICE(device); |
| 827 | |
Puranam V G Tejaswi | 783c16a | 2020-11-30 21:01:16 +0530 | [diff] [blame] | 828 | if (!ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 829 | return; |
| 830 | |
| 831 | gpumem_free_entry(context->user_ctxt_record); |
Lynus Vaz | 0bf63cc | 2017-09-18 21:19:54 +0530 | [diff] [blame] | 832 | |
| 833 | /* Put the extra ref from gpumem_alloc_entry() */ |
| 834 | kgsl_mem_entry_put(context->user_ctxt_record); |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 835 | } |
| 836 | |
| 837 | int a6xx_preemption_context_init(struct kgsl_context *context) |
| 838 | { |
| 839 | struct kgsl_device *device = context->device; |
| 840 | struct adreno_device *adreno_dev = ADRENO_DEVICE(device); |
Harshdeep Dhatt | 58b70eb | 2017-03-28 09:21:40 -0600 | [diff] [blame] | 841 | uint64_t flags = 0; |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 842 | |
Puranam V G Tejaswi | 783c16a | 2020-11-30 21:01:16 +0530 | [diff] [blame] | 843 | if (!ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 844 | return 0; |
| 845 | |
Harshdeep Dhatt | 58b70eb | 2017-03-28 09:21:40 -0600 | [diff] [blame] | 846 | if (context->flags & KGSL_CONTEXT_SECURE) |
| 847 | flags |= KGSL_MEMFLAGS_SECURE; |
| 848 | |
Deepak Kumar | cf056d1 | 2018-04-17 15:59:42 +0530 | [diff] [blame] | 849 | if (kgsl_is_compat_task()) |
| 850 | flags |= KGSL_MEMFLAGS_FORCE_32BIT; |
| 851 | |
Lynus Vaz | 0bf63cc | 2017-09-18 21:19:54 +0530 | [diff] [blame] | 852 | /* |
| 853 | * gpumem_alloc_entry takes an extra refcount. Put it only when |
| 854 | * destroying the context to keep the context record valid |
| 855 | */ |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 856 | context->user_ctxt_record = gpumem_alloc_entry(context->dev_priv, |
Harshdeep Dhatt | 58b70eb | 2017-03-28 09:21:40 -0600 | [diff] [blame] | 857 | A6XX_CP_CTXRECORD_USER_RESTORE_SIZE, flags); |
Harshdeep Dhatt | 2e42f12 | 2017-05-31 17:27:19 -0600 | [diff] [blame] | 858 | if (IS_ERR(context->user_ctxt_record)) { |
| 859 | int ret = PTR_ERR(context->user_ctxt_record); |
| 860 | |
| 861 | context->user_ctxt_record = NULL; |
| 862 | return ret; |
| 863 | } |
| 864 | |
| 865 | return 0; |
| 866 | } |