blob: 97b0cb2f447841f7ac3c404dd765a2d239d42689 [file] [log] [blame]
Harshdeep Dhatt106901f2017-12-08 14:31:42 -07001/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "adreno.h"
14#include "adreno_a6xx.h"
15#include "a6xx_reg.h"
16#include "adreno_trace.h"
17#include "adreno_pm4types.h"
18
19#define PREEMPT_RECORD(_field) \
20 offsetof(struct a6xx_cp_preemption_record, _field)
21
22#define PREEMPT_SMMU_RECORD(_field) \
23 offsetof(struct a6xx_cp_smmu_info, _field)
24
25enum {
26 SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO = 0,
27 SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_NON_SECURE_SAVE_ADDR,
28 SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR,
29 SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR,
30 SET_PSEUDO_REGISTER_SAVE_REGISTER_COUNTER,
31};
32
33static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer)
34{
35 struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
36 unsigned int wptr;
37 unsigned long flags;
Harshdeep Dhatt664ee632017-11-01 15:03:11 -060038 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
39
40 /*
41 * Need to make sure GPU is up before we read the
42 * WPTR as fence doesn't wake GPU on read operation.
43 */
44 if (in_interrupt() == 0) {
45 int status;
46
47 if (gpudev->oob_set) {
48 status = gpudev->oob_set(adreno_dev,
49 OOB_PREEMPTION_SET_MASK,
50 OOB_PREEMPTION_CHECK_MASK,
51 OOB_PREEMPTION_CLEAR_MASK);
52 if (status)
53 return;
54 }
55 }
56
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -060057
58 spin_lock_irqsave(&rb->preempt_lock, flags);
59
60 adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
61
62 if (wptr != rb->wptr) {
63 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
64 rb->wptr);
65 /*
66 * In case something got submitted while preemption was on
67 * going, reset the timer.
68 */
69 reset_timer = true;
70 }
71
72 if (reset_timer)
73 rb->dispatch_q.expires = jiffies +
74 msecs_to_jiffies(adreno_drawobj_timeout);
75
76 spin_unlock_irqrestore(&rb->preempt_lock, flags);
Harshdeep Dhatt664ee632017-11-01 15:03:11 -060077
78 if (in_interrupt() == 0) {
79 if (gpudev->oob_clear)
80 gpudev->oob_clear(adreno_dev,
81 OOB_PREEMPTION_CLEAR_MASK);
82 }
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -060083}
84
85static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev,
86 enum adreno_preempt_states old, enum adreno_preempt_states new)
87{
88 return (atomic_cmpxchg(&adreno_dev->preempt.state, old, new) == old);
89}
90
91static void _a6xx_preemption_done(struct adreno_device *adreno_dev)
92{
93 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
94 unsigned int status;
95
96 /*
97 * In the very unlikely case that the power is off, do nothing - the
98 * state will be reset on power up and everybody will be happy
99 */
100
101 if (!kgsl_state_is_awake(device))
102 return;
103
104 adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
105
106 if (status & 0x1) {
107 KGSL_DRV_ERR(device,
108 "Preemption not complete: status=%X cur=%d R/W=%X/%X next=%d R/W=%X/%X\n",
109 status, adreno_dev->cur_rb->id,
110 adreno_get_rptr(adreno_dev->cur_rb),
111 adreno_dev->cur_rb->wptr, adreno_dev->next_rb->id,
112 adreno_get_rptr(adreno_dev->next_rb),
113 adreno_dev->next_rb->wptr);
114
115 /* Set a fault and restart */
116 adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
117 adreno_dispatcher_schedule(device);
118
119 return;
120 }
121
122 del_timer_sync(&adreno_dev->preempt.timer);
123
Harshdeep Dhatt003f6cf2017-12-14 11:00:22 -0700124 adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_LEVEL_STATUS, &status);
125
126 trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb,
127 status);
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600128
129 /* Clean up all the bits */
130 adreno_dev->prev_rb = adreno_dev->cur_rb;
131 adreno_dev->cur_rb = adreno_dev->next_rb;
132 adreno_dev->next_rb = NULL;
133
134 /* Update the wptr for the new command queue */
135 _update_wptr(adreno_dev, true);
136
137 /* Update the dispatcher timer for the new command queue */
138 mod_timer(&adreno_dev->dispatcher.timer,
139 adreno_dev->cur_rb->dispatch_q.expires);
140
141 /* Clear the preempt state */
142 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
143}
144
145static void _a6xx_preemption_fault(struct adreno_device *adreno_dev)
146{
147 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
148 unsigned int status;
149
150 /*
151 * If the power is on check the preemption status one more time - if it
152 * was successful then just transition to the complete state
153 */
154 if (kgsl_state_is_awake(device)) {
155 adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
156
157 if (status == 0) {
158 adreno_set_preempt_state(adreno_dev,
159 ADRENO_PREEMPT_COMPLETE);
160
161 adreno_dispatcher_schedule(device);
162 return;
163 }
164 }
165
166 KGSL_DRV_ERR(device,
167 "Preemption timed out: cur=%d R/W=%X/%X, next=%d R/W=%X/%X\n",
168 adreno_dev->cur_rb->id,
169 adreno_get_rptr(adreno_dev->cur_rb), adreno_dev->cur_rb->wptr,
170 adreno_dev->next_rb->id,
171 adreno_get_rptr(adreno_dev->next_rb),
172 adreno_dev->next_rb->wptr);
173
174 adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
175 adreno_dispatcher_schedule(device);
176}
177
178static void _a6xx_preemption_worker(struct work_struct *work)
179{
180 struct adreno_preemption *preempt = container_of(work,
181 struct adreno_preemption, work);
182 struct adreno_device *adreno_dev = container_of(preempt,
183 struct adreno_device, preempt);
184 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
185
186 /* Need to take the mutex to make sure that the power stays on */
187 mutex_lock(&device->mutex);
188
189 if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_FAULTED))
190 _a6xx_preemption_fault(adreno_dev);
191
192 mutex_unlock(&device->mutex);
193}
194
195static void _a6xx_preemption_timer(unsigned long data)
196{
197 struct adreno_device *adreno_dev = (struct adreno_device *) data;
198
199 /* We should only be here from a triggered state */
200 if (!adreno_move_preempt_state(adreno_dev,
201 ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_FAULTED))
202 return;
203
204 /* Schedule the worker to take care of the details */
205 queue_work(system_unbound_wq, &adreno_dev->preempt.work);
206}
207
208/* Find the highest priority active ringbuffer */
209static struct adreno_ringbuffer *a6xx_next_ringbuffer(
210 struct adreno_device *adreno_dev)
211{
212 struct adreno_ringbuffer *rb;
213 unsigned long flags;
214 unsigned int i;
215
216 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
217 bool empty;
218
219 spin_lock_irqsave(&rb->preempt_lock, flags);
220 empty = adreno_rb_empty(rb);
221 spin_unlock_irqrestore(&rb->preempt_lock, flags);
222
223 if (empty == false)
224 return rb;
225 }
226
227 return NULL;
228}
229
230void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
231{
232 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
233 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
234 struct adreno_ringbuffer *next;
Harshdeep Dhatt6d9eff92017-10-10 12:14:18 -0600235 uint64_t ttbr0, gpuaddr;
Harshdeep Dhatt003f6cf2017-12-14 11:00:22 -0700236 unsigned int contextidr, cntl;
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600237 unsigned long flags;
Harshdeep Dhatta8ec51c2017-09-21 17:24:08 -0600238 struct adreno_preemption *preempt = &adreno_dev->preempt;
Harshdeep Dhatt26c54f22017-08-30 17:37:39 -0600239
Harshdeep Dhatt003f6cf2017-12-14 11:00:22 -0700240 cntl = (((preempt->preempt_level << 6) & 0xC0) |
241 ((preempt->skipsaverestore << 9) & 0x200) |
242 ((preempt->usesgmem << 8) & 0x100) | 0x1);
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600243
244 /* Put ourselves into a possible trigger state */
245 if (!adreno_move_preempt_state(adreno_dev,
246 ADRENO_PREEMPT_NONE, ADRENO_PREEMPT_START))
247 return;
248
249 /* Get the next ringbuffer to preempt in */
250 next = a6xx_next_ringbuffer(adreno_dev);
251
252 /*
253 * Nothing to do if every ringbuffer is empty or if the current
254 * ringbuffer is the only active one
255 */
256 if (next == NULL || next == adreno_dev->cur_rb) {
257 /*
258 * Update any critical things that might have been skipped while
259 * we were looking for a new ringbuffer
260 */
261
262 if (next != NULL) {
263 _update_wptr(adreno_dev, false);
264
265 mod_timer(&adreno_dev->dispatcher.timer,
266 adreno_dev->cur_rb->dispatch_q.expires);
267 }
268
269 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
270 return;
271 }
272
273 /* Turn off the dispatcher timer */
274 del_timer(&adreno_dev->dispatcher.timer);
275
276 /*
277 * This is the most critical section - we need to take care not to race
278 * until we have programmed the CP for the switch
279 */
280
281 spin_lock_irqsave(&next->preempt_lock, flags);
282
283 /*
284 * Get the pagetable from the pagetable info.
285 * The pagetable_desc is allocated and mapped at probe time, and
286 * preemption_desc at init time, so no need to check if
287 * sharedmem accesses to these memdescs succeed.
288 */
289 kgsl_sharedmem_readq(&next->pagetable_desc, &ttbr0,
290 PT_INFO_OFFSET(ttbr0));
291 kgsl_sharedmem_readl(&next->pagetable_desc, &contextidr,
292 PT_INFO_OFFSET(contextidr));
293
294 kgsl_sharedmem_writel(device, &next->preemption_desc,
295 PREEMPT_RECORD(wptr), next->wptr);
296
Harshdeep Dhatt4ab35b12017-11-16 08:34:39 -0700297 preempt->count++;
298
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600299 spin_unlock_irqrestore(&next->preempt_lock, flags);
300
301 /* And write it to the smmu info */
302 kgsl_sharedmem_writeq(device, &iommu->smmu_info,
303 PREEMPT_SMMU_RECORD(ttbr0), ttbr0);
304 kgsl_sharedmem_writel(device, &iommu->smmu_info,
305 PREEMPT_SMMU_RECORD(context_idr), contextidr);
306
Harshdeep Dhatt6d9eff92017-10-10 12:14:18 -0600307 kgsl_sharedmem_readq(&device->scratch, &gpuaddr,
308 SCRATCH_PREEMPTION_CTXT_RESTORE_ADDR_OFFSET(next->id));
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600309
Harshdeep Dhatt59a69572017-11-01 14:46:13 -0600310 /*
Harshdeep Dhatt4a288a02017-11-09 14:39:51 -0700311 * Set a keepalive bit before the first preemption register write.
312 * This is required since while each individual write to the context
313 * switch registers will wake the GPU from collapse, it will not in
314 * itself cause GPU activity. Thus, the GPU could technically be
315 * re-collapsed between subsequent register writes leading to a
316 * prolonged preemption sequence. The keepalive bit prevents any
317 * further power collapse while it is set.
318 * It is more efficient to use a keepalive+wake-on-fence approach here
319 * rather than an OOB. Both keepalive and the fence are effectively
320 * free when the GPU is already powered on, whereas an OOB requires an
321 * unconditional handshake with the GMU.
322 */
323 kgsl_gmu_regrmw(device, A6XX_GMU_AO_SPARE_CNTL, 0x0, 0x2);
324
325 /*
Harshdeep Dhatt59a69572017-11-01 14:46:13 -0600326 * Fenced writes on this path will make sure the GPU is woken up
327 * in case it was power collapsed by the GMU.
328 */
329 adreno_gmu_fenced_write(adreno_dev,
330 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
331 lower_32_bits(next->preemption_desc.gpuaddr),
332 FENCE_STATUS_WRITEDROPPED1_MASK);
333
334 adreno_gmu_fenced_write(adreno_dev,
335 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
336 upper_32_bits(next->preemption_desc.gpuaddr),
337 FENCE_STATUS_WRITEDROPPED1_MASK);
338
339 adreno_gmu_fenced_write(adreno_dev,
Harshdeep Dhatt58b70eb2017-03-28 09:21:40 -0600340 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
341 lower_32_bits(next->secure_preemption_desc.gpuaddr),
342 FENCE_STATUS_WRITEDROPPED1_MASK);
343
344 adreno_gmu_fenced_write(adreno_dev,
345 ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
346 upper_32_bits(next->secure_preemption_desc.gpuaddr),
347 FENCE_STATUS_WRITEDROPPED1_MASK);
348
349 adreno_gmu_fenced_write(adreno_dev,
Harshdeep Dhatt59a69572017-11-01 14:46:13 -0600350 ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
351 lower_32_bits(gpuaddr),
352 FENCE_STATUS_WRITEDROPPED1_MASK);
353
354 adreno_gmu_fenced_write(adreno_dev,
355 ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
356 upper_32_bits(gpuaddr),
357 FENCE_STATUS_WRITEDROPPED1_MASK);
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600358
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600359 adreno_dev->next_rb = next;
360
361 /* Start the timer to detect a stuck preemption */
362 mod_timer(&adreno_dev->preempt.timer,
363 jiffies + msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT));
364
Harshdeep Dhatt003f6cf2017-12-14 11:00:22 -0700365 trace_adreno_preempt_trigger(adreno_dev->cur_rb, adreno_dev->next_rb,
366 cntl);
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600367
368 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
369
370 /* Trigger the preemption */
Harshdeep Dhatt003f6cf2017-12-14 11:00:22 -0700371 adreno_gmu_fenced_write(adreno_dev, ADRENO_REG_CP_PREEMPT, cntl,
Harshdeep Dhatt59a69572017-11-01 14:46:13 -0600372 FENCE_STATUS_WRITEDROPPED1_MASK);
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600373}
374
375void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit)
376{
377 unsigned int status;
378
379 if (!adreno_move_preempt_state(adreno_dev,
380 ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_PENDING))
381 return;
382
383 adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
384
385 if (status & 0x1) {
386 KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
387 "preempt interrupt with non-zero status: %X\n", status);
388
389 /*
390 * Under the assumption that this is a race between the
391 * interrupt and the register, schedule the worker to clean up.
392 * If the status still hasn't resolved itself by the time we get
393 * there then we have to assume something bad happened
394 */
395 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE);
396 adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
397 return;
398 }
399
Harshdeep Dhatt106901f2017-12-08 14:31:42 -0700400 /*
401 * We can now safely clear the preemption keepalive bit, allowing
402 * power collapse to resume its regular activity.
403 */
404 kgsl_gmu_regrmw(KGSL_DEVICE(adreno_dev), A6XX_GMU_AO_SPARE_CNTL, 0x2,
405 0x0);
406
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600407 del_timer(&adreno_dev->preempt.timer);
408
Harshdeep Dhatt003f6cf2017-12-14 11:00:22 -0700409 adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_LEVEL_STATUS, &status);
410
411 trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb,
412 status);
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600413
414 adreno_dev->prev_rb = adreno_dev->cur_rb;
415 adreno_dev->cur_rb = adreno_dev->next_rb;
416 adreno_dev->next_rb = NULL;
417
418 /* Update the wptr if it changed while preemption was ongoing */
419 _update_wptr(adreno_dev, true);
420
421 /* Update the dispatcher timer for the new command queue */
422 mod_timer(&adreno_dev->dispatcher.timer,
423 adreno_dev->cur_rb->dispatch_q.expires);
424
425 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
426
427 a6xx_preemption_trigger(adreno_dev);
428}
429
430void a6xx_preemption_schedule(struct adreno_device *adreno_dev)
431{
432 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
433
Harshdeep Dhatt7ee8a862017-11-20 17:51:54 -0700434 if (!adreno_is_preemption_enabled(adreno_dev))
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600435 return;
436
437 mutex_lock(&device->mutex);
438
439 if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE))
440 _a6xx_preemption_done(adreno_dev);
441
442 a6xx_preemption_trigger(adreno_dev);
443
444 mutex_unlock(&device->mutex);
445}
446
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600447unsigned int a6xx_preemption_pre_ibsubmit(
448 struct adreno_device *adreno_dev,
449 struct adreno_ringbuffer *rb,
450 unsigned int *cmds, struct kgsl_context *context)
451{
452 unsigned int *cmds_orig = cmds;
Harshdeep Dhatt6d9eff92017-10-10 12:14:18 -0600453 uint64_t gpuaddr = 0;
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600454
Harshdeep Dhatt6d9eff92017-10-10 12:14:18 -0600455 if (context) {
456 gpuaddr = context->user_ctxt_record->memdesc.gpuaddr;
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600457 *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 15);
Harshdeep Dhatt6d9eff92017-10-10 12:14:18 -0600458 } else {
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600459 *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 12);
Harshdeep Dhatt6d9eff92017-10-10 12:14:18 -0600460 }
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600461
462 /* NULL SMMU_INFO buffer - we track in KMD */
463 *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO;
464 cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
465
466 *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_NON_SECURE_SAVE_ADDR;
467 cmds += cp_gpuaddr(adreno_dev, cmds, rb->preemption_desc.gpuaddr);
468
469 *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR;
Harshdeep Dhatt58b70eb2017-03-28 09:21:40 -0600470 cmds += cp_gpuaddr(adreno_dev, cmds,
471 rb->secure_preemption_desc.gpuaddr);
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600472
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600473 if (context) {
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600474
475 *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR;
476 cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr);
477 }
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600478
479 /*
480 * There is no need to specify this address when we are about to
481 * trigger preemption. This is because CP internally stores this
482 * address specified here in the CP_SET_PSEUDO_REGISTER payload to
483 * the context record and thus knows from where to restore
484 * the saved perfcounters for the new ringbuffer.
485 */
486 *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_COUNTER;
487 cmds += cp_gpuaddr(adreno_dev, cmds,
488 rb->perfcounter_save_restore_desc.gpuaddr);
489
Harshdeep Dhatt6d9eff92017-10-10 12:14:18 -0600490 if (context) {
491 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
492 struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
493 struct adreno_ringbuffer *rb = drawctxt->rb;
494 uint64_t dest =
495 SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(device,
496 rb->id);
497
498 *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2);
499 cmds += cp_gpuaddr(adreno_dev, cmds, dest);
500 *cmds++ = lower_32_bits(gpuaddr);
501 *cmds++ = upper_32_bits(gpuaddr);
502 }
503
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600504 return (unsigned int) (cmds - cmds_orig);
505}
506
507unsigned int a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
508 unsigned int *cmds)
509{
510 unsigned int *cmds_orig = cmds;
Harshdeep Dhatt6d9eff92017-10-10 12:14:18 -0600511 struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
512
513 if (rb) {
514 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
515 uint64_t dest = SCRATCH_PREEMPTION_CTXT_RESTORE_GPU_ADDR(device,
516 rb->id);
517
518 *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 2);
519 cmds += cp_gpuaddr(adreno_dev, cmds, dest);
520 *cmds++ = 0;
521 *cmds++ = 0;
522 }
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600523
524 *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
525 cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
526 *cmds++ = 1;
527 *cmds++ = 0;
528
529 return (unsigned int) (cmds - cmds_orig);
530}
531
532void a6xx_preemption_start(struct adreno_device *adreno_dev)
533{
534 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
535 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
536 struct adreno_ringbuffer *rb;
537 unsigned int i;
538
Harshdeep Dhatt7ee8a862017-11-20 17:51:54 -0700539 if (!adreno_is_preemption_enabled(adreno_dev))
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600540 return;
541
542 /* Force the state to be clear */
543 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
544
545 /* smmu_info is allocated and mapped in a6xx_preemption_iommu_init */
546 kgsl_sharedmem_writel(device, &iommu->smmu_info,
547 PREEMPT_SMMU_RECORD(magic), A6XX_CP_SMMU_INFO_MAGIC_REF);
548 kgsl_sharedmem_writeq(device, &iommu->smmu_info,
549 PREEMPT_SMMU_RECORD(ttbr0), MMU_DEFAULT_TTBR0(device));
550
551 /* The CP doesn't use the asid record, so poison it */
552 kgsl_sharedmem_writel(device, &iommu->smmu_info,
553 PREEMPT_SMMU_RECORD(asid), 0xDECAFBAD);
554 kgsl_sharedmem_writel(device, &iommu->smmu_info,
555 PREEMPT_SMMU_RECORD(context_idr),
556 MMU_DEFAULT_CONTEXTIDR(device));
557
558 adreno_writereg64(adreno_dev,
559 ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
560 ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
561 iommu->smmu_info.gpuaddr);
562
563 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
564 /*
565 * preemption_desc is allocated and mapped at init time,
566 * so no need to check sharedmem_writel return value
567 */
568 kgsl_sharedmem_writel(device, &rb->preemption_desc,
569 PREEMPT_RECORD(rptr), 0);
570 kgsl_sharedmem_writel(device, &rb->preemption_desc,
571 PREEMPT_RECORD(wptr), 0);
572
573 adreno_ringbuffer_set_pagetable(rb,
574 device->mmu.defaultpagetable);
575 }
576}
577
578static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
579 struct adreno_ringbuffer *rb, uint64_t counteraddr)
580{
581 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
582 int ret;
583
584 ret = kgsl_allocate_global(device, &rb->preemption_desc,
585 A6XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED,
586 "preemption_desc");
587 if (ret)
588 return ret;
589
Harshdeep Dhatt58b70eb2017-03-28 09:21:40 -0600590 ret = kgsl_allocate_user(device, &rb->secure_preemption_desc,
591 A6XX_CP_CTXRECORD_SIZE_IN_BYTES,
592 KGSL_MEMFLAGS_SECURE | KGSL_MEMDESC_PRIVILEGED);
593 if (ret)
594 return ret;
595
596 ret = kgsl_iommu_map_global_secure_pt_entry(device,
597 &rb->secure_preemption_desc);
598 if (ret)
599 return ret;
600
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600601 ret = kgsl_allocate_global(device, &rb->perfcounter_save_restore_desc,
602 A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE, 0,
603 KGSL_MEMDESC_PRIVILEGED, "perfcounter_save_restore_desc");
604 if (ret)
605 return ret;
606
607 kgsl_sharedmem_writel(device, &rb->preemption_desc,
608 PREEMPT_RECORD(magic), A6XX_CP_CTXRECORD_MAGIC_REF);
609 kgsl_sharedmem_writel(device, &rb->preemption_desc,
610 PREEMPT_RECORD(info), 0);
611 kgsl_sharedmem_writel(device, &rb->preemption_desc,
612 PREEMPT_RECORD(data), 0);
613 kgsl_sharedmem_writel(device, &rb->preemption_desc,
614 PREEMPT_RECORD(cntl), A6XX_CP_RB_CNTL_DEFAULT);
615 kgsl_sharedmem_writel(device, &rb->preemption_desc,
616 PREEMPT_RECORD(rptr), 0);
617 kgsl_sharedmem_writel(device, &rb->preemption_desc,
618 PREEMPT_RECORD(wptr), 0);
619 kgsl_sharedmem_writeq(device, &rb->preemption_desc,
620 PREEMPT_RECORD(rptr_addr), SCRATCH_RPTR_GPU_ADDR(device,
621 rb->id));
622 kgsl_sharedmem_writeq(device, &rb->preemption_desc,
623 PREEMPT_RECORD(rbase), rb->buffer_desc.gpuaddr);
624 kgsl_sharedmem_writeq(device, &rb->preemption_desc,
625 PREEMPT_RECORD(counter), counteraddr);
626
627 return 0;
628}
629
630#ifdef CONFIG_QCOM_KGSL_IOMMU
631static int a6xx_preemption_iommu_init(struct adreno_device *adreno_dev)
632{
633 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
634 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
635
636 /* Allocate mem for storing preemption smmu record */
637 return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE,
638 KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED,
639 "smmu_info");
640}
641
642static void a6xx_preemption_iommu_close(struct adreno_device *adreno_dev)
643{
644 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
645 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
646
647 kgsl_free_global(device, &iommu->smmu_info);
648}
649#else
650static int a6xx_preemption_iommu_init(struct adreno_device *adreno_dev)
651{
652 return -ENODEV;
653}
654
655static void a6xx_preemption_iommu_close(struct adreno_device *adreno_dev)
656{
657}
658#endif
659
660static void a6xx_preemption_close(struct kgsl_device *device)
661{
662 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
663 struct adreno_preemption *preempt = &adreno_dev->preempt;
664 struct adreno_ringbuffer *rb;
665 unsigned int i;
666
667 del_timer(&preempt->timer);
668 kgsl_free_global(device, &preempt->counters);
669 a6xx_preemption_iommu_close(adreno_dev);
670
671 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
672 kgsl_free_global(device, &rb->preemption_desc);
673 kgsl_free_global(device, &rb->perfcounter_save_restore_desc);
Harshdeep Dhatt58b70eb2017-03-28 09:21:40 -0600674 kgsl_iommu_unmap_global_secure_pt_entry(device,
675 &rb->secure_preemption_desc);
676 kgsl_sharedmem_free(&rb->secure_preemption_desc);
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600677 }
678}
679
680int a6xx_preemption_init(struct adreno_device *adreno_dev)
681{
682 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
683 struct adreno_preemption *preempt = &adreno_dev->preempt;
684 struct adreno_ringbuffer *rb;
685 int ret;
686 unsigned int i;
687 uint64_t addr;
688
689 /* We are dependent on IOMMU to make preemption go on the CP side */
690 if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
691 return -ENODEV;
692
693 INIT_WORK(&preempt->work, _a6xx_preemption_worker);
694
695 setup_timer(&preempt->timer, _a6xx_preemption_timer,
696 (unsigned long) adreno_dev);
697
698 /* Allocate mem for storing preemption counters */
699 ret = kgsl_allocate_global(device, &preempt->counters,
700 adreno_dev->num_ringbuffers *
701 A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0,
702 "preemption_counters");
703 if (ret)
704 goto err;
705
706 addr = preempt->counters.gpuaddr;
707
708 /* Allocate mem for storing preemption switch record */
709 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
710 ret = a6xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
711 if (ret)
712 goto err;
713
714 addr += A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
715 }
716
717 ret = a6xx_preemption_iommu_init(adreno_dev);
718
719err:
720 if (ret)
721 a6xx_preemption_close(device);
722
723 return ret;
724}
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600725
726void a6xx_preemption_context_destroy(struct kgsl_context *context)
727{
728 struct kgsl_device *device = context->device;
729 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
730
Harshdeep Dhatt7ee8a862017-11-20 17:51:54 -0700731 if (!adreno_is_preemption_enabled(adreno_dev))
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600732 return;
733
734 gpumem_free_entry(context->user_ctxt_record);
Lynus Vaz0bf63cc2017-09-18 21:19:54 +0530735
736 /* Put the extra ref from gpumem_alloc_entry() */
737 kgsl_mem_entry_put(context->user_ctxt_record);
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600738}
739
740int a6xx_preemption_context_init(struct kgsl_context *context)
741{
742 struct kgsl_device *device = context->device;
743 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Harshdeep Dhatt58b70eb2017-03-28 09:21:40 -0600744 uint64_t flags = 0;
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600745
Harshdeep Dhatt7ee8a862017-11-20 17:51:54 -0700746 if (!adreno_is_preemption_enabled(adreno_dev))
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600747 return 0;
748
Harshdeep Dhatt58b70eb2017-03-28 09:21:40 -0600749 if (context->flags & KGSL_CONTEXT_SECURE)
750 flags |= KGSL_MEMFLAGS_SECURE;
751
Lynus Vaz0bf63cc2017-09-18 21:19:54 +0530752 /*
753 * gpumem_alloc_entry takes an extra refcount. Put it only when
754 * destroying the context to keep the context record valid
755 */
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600756 context->user_ctxt_record = gpumem_alloc_entry(context->dev_priv,
Harshdeep Dhatt58b70eb2017-03-28 09:21:40 -0600757 A6XX_CP_CTXRECORD_USER_RESTORE_SIZE, flags);
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600758 if (IS_ERR(context->user_ctxt_record)) {
759 int ret = PTR_ERR(context->user_ctxt_record);
760
761 context->user_ctxt_record = NULL;
762 return ret;
763 }
764
765 return 0;
766}