blob: 1d5f4a5ad98a8733d30526aa923f0c220804cd20 [file] [log] [blame]
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -06001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "adreno.h"
14#include "adreno_a6xx.h"
15#include "a6xx_reg.h"
16#include "adreno_trace.h"
17#include "adreno_pm4types.h"
18
19#define PREEMPT_RECORD(_field) \
20 offsetof(struct a6xx_cp_preemption_record, _field)
21
22#define PREEMPT_SMMU_RECORD(_field) \
23 offsetof(struct a6xx_cp_smmu_info, _field)
24
25enum {
26 SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO = 0,
27 SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_NON_SECURE_SAVE_ADDR,
28 SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR,
29 SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR,
30 SET_PSEUDO_REGISTER_SAVE_REGISTER_COUNTER,
31};
32
33static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer)
34{
35 struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
36 unsigned int wptr;
37 unsigned long flags;
38
39 spin_lock_irqsave(&rb->preempt_lock, flags);
40
41 adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
42
43 if (wptr != rb->wptr) {
44 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
45 rb->wptr);
46 /*
47 * In case something got submitted while preemption was on
48 * going, reset the timer.
49 */
50 reset_timer = true;
51 }
52
53 if (reset_timer)
54 rb->dispatch_q.expires = jiffies +
55 msecs_to_jiffies(adreno_drawobj_timeout);
56
57 spin_unlock_irqrestore(&rb->preempt_lock, flags);
58}
59
60static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev,
61 enum adreno_preempt_states old, enum adreno_preempt_states new)
62{
63 return (atomic_cmpxchg(&adreno_dev->preempt.state, old, new) == old);
64}
65
66static void _a6xx_preemption_done(struct adreno_device *adreno_dev)
67{
68 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
69 unsigned int status;
70
71 /*
72 * In the very unlikely case that the power is off, do nothing - the
73 * state will be reset on power up and everybody will be happy
74 */
75
76 if (!kgsl_state_is_awake(device))
77 return;
78
79 adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
80
81 if (status & 0x1) {
82 KGSL_DRV_ERR(device,
83 "Preemption not complete: status=%X cur=%d R/W=%X/%X next=%d R/W=%X/%X\n",
84 status, adreno_dev->cur_rb->id,
85 adreno_get_rptr(adreno_dev->cur_rb),
86 adreno_dev->cur_rb->wptr, adreno_dev->next_rb->id,
87 adreno_get_rptr(adreno_dev->next_rb),
88 adreno_dev->next_rb->wptr);
89
90 /* Set a fault and restart */
91 adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
92 adreno_dispatcher_schedule(device);
93
94 return;
95 }
96
97 del_timer_sync(&adreno_dev->preempt.timer);
98
99 trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb);
100
101 /* Clean up all the bits */
102 adreno_dev->prev_rb = adreno_dev->cur_rb;
103 adreno_dev->cur_rb = adreno_dev->next_rb;
104 adreno_dev->next_rb = NULL;
105
106 /* Update the wptr for the new command queue */
107 _update_wptr(adreno_dev, true);
108
109 /* Update the dispatcher timer for the new command queue */
110 mod_timer(&adreno_dev->dispatcher.timer,
111 adreno_dev->cur_rb->dispatch_q.expires);
112
113 /* Clear the preempt state */
114 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
115}
116
117static void _a6xx_preemption_fault(struct adreno_device *adreno_dev)
118{
119 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
120 unsigned int status;
121
122 /*
123 * If the power is on check the preemption status one more time - if it
124 * was successful then just transition to the complete state
125 */
126 if (kgsl_state_is_awake(device)) {
127 adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
128
129 if (status == 0) {
130 adreno_set_preempt_state(adreno_dev,
131 ADRENO_PREEMPT_COMPLETE);
132
133 adreno_dispatcher_schedule(device);
134 return;
135 }
136 }
137
138 KGSL_DRV_ERR(device,
139 "Preemption timed out: cur=%d R/W=%X/%X, next=%d R/W=%X/%X\n",
140 adreno_dev->cur_rb->id,
141 adreno_get_rptr(adreno_dev->cur_rb), adreno_dev->cur_rb->wptr,
142 adreno_dev->next_rb->id,
143 adreno_get_rptr(adreno_dev->next_rb),
144 adreno_dev->next_rb->wptr);
145
146 adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
147 adreno_dispatcher_schedule(device);
148}
149
150static void _a6xx_preemption_worker(struct work_struct *work)
151{
152 struct adreno_preemption *preempt = container_of(work,
153 struct adreno_preemption, work);
154 struct adreno_device *adreno_dev = container_of(preempt,
155 struct adreno_device, preempt);
156 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
157
158 /* Need to take the mutex to make sure that the power stays on */
159 mutex_lock(&device->mutex);
160
161 if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_FAULTED))
162 _a6xx_preemption_fault(adreno_dev);
163
164 mutex_unlock(&device->mutex);
165}
166
167static void _a6xx_preemption_timer(unsigned long data)
168{
169 struct adreno_device *adreno_dev = (struct adreno_device *) data;
170
171 /* We should only be here from a triggered state */
172 if (!adreno_move_preempt_state(adreno_dev,
173 ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_FAULTED))
174 return;
175
176 /* Schedule the worker to take care of the details */
177 queue_work(system_unbound_wq, &adreno_dev->preempt.work);
178}
179
180/* Find the highest priority active ringbuffer */
181static struct adreno_ringbuffer *a6xx_next_ringbuffer(
182 struct adreno_device *adreno_dev)
183{
184 struct adreno_ringbuffer *rb;
185 unsigned long flags;
186 unsigned int i;
187
188 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
189 bool empty;
190
191 spin_lock_irqsave(&rb->preempt_lock, flags);
192 empty = adreno_rb_empty(rb);
193 spin_unlock_irqrestore(&rb->preempt_lock, flags);
194
195 if (empty == false)
196 return rb;
197 }
198
199 return NULL;
200}
201
202void a6xx_preemption_trigger(struct adreno_device *adreno_dev)
203{
204 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
205 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
206 struct adreno_ringbuffer *next;
207 uint64_t ttbr0;
208 unsigned int contextidr;
209 unsigned long flags;
Harshdeep Dhatt26c54f22017-08-30 17:37:39 -0600210 uint32_t preempt_level, usesgmem, skipsaverestore;
211
212 preempt_level = adreno_dev->preempt_level;
213 usesgmem = adreno_dev->usesgmem;
214 skipsaverestore = adreno_dev->skipsaverestore;
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600215
216 /* Put ourselves into a possible trigger state */
217 if (!adreno_move_preempt_state(adreno_dev,
218 ADRENO_PREEMPT_NONE, ADRENO_PREEMPT_START))
219 return;
220
221 /* Get the next ringbuffer to preempt in */
222 next = a6xx_next_ringbuffer(adreno_dev);
223
224 /*
225 * Nothing to do if every ringbuffer is empty or if the current
226 * ringbuffer is the only active one
227 */
228 if (next == NULL || next == adreno_dev->cur_rb) {
229 /*
230 * Update any critical things that might have been skipped while
231 * we were looking for a new ringbuffer
232 */
233
234 if (next != NULL) {
235 _update_wptr(adreno_dev, false);
236
237 mod_timer(&adreno_dev->dispatcher.timer,
238 adreno_dev->cur_rb->dispatch_q.expires);
239 }
240
241 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
242 return;
243 }
244
245 /* Turn off the dispatcher timer */
246 del_timer(&adreno_dev->dispatcher.timer);
247
248 /*
249 * This is the most critical section - we need to take care not to race
250 * until we have programmed the CP for the switch
251 */
252
253 spin_lock_irqsave(&next->preempt_lock, flags);
254
255 /*
256 * Get the pagetable from the pagetable info.
257 * The pagetable_desc is allocated and mapped at probe time, and
258 * preemption_desc at init time, so no need to check if
259 * sharedmem accesses to these memdescs succeed.
260 */
261 kgsl_sharedmem_readq(&next->pagetable_desc, &ttbr0,
262 PT_INFO_OFFSET(ttbr0));
263 kgsl_sharedmem_readl(&next->pagetable_desc, &contextidr,
264 PT_INFO_OFFSET(contextidr));
265
266 kgsl_sharedmem_writel(device, &next->preemption_desc,
267 PREEMPT_RECORD(wptr), next->wptr);
268
269 spin_unlock_irqrestore(&next->preempt_lock, flags);
270
271 /* And write it to the smmu info */
272 kgsl_sharedmem_writeq(device, &iommu->smmu_info,
273 PREEMPT_SMMU_RECORD(ttbr0), ttbr0);
274 kgsl_sharedmem_writel(device, &iommu->smmu_info,
275 PREEMPT_SMMU_RECORD(context_idr), contextidr);
276
277 kgsl_regwrite(device,
278 A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
279 lower_32_bits(next->preemption_desc.gpuaddr));
280 kgsl_regwrite(device,
281 A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
282 upper_32_bits(next->preemption_desc.gpuaddr));
283
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600284 if (next->drawctxt_active) {
285 struct kgsl_context *context = &next->drawctxt_active->base;
286 uint64_t gpuaddr = context->user_ctxt_record->memdesc.gpuaddr;
287
288 kgsl_regwrite(device,
289 A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
290 lower_32_bits(gpuaddr));
291 kgsl_regwrite(device,
292 A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
293 upper_32_bits(gpuaddr));
294 }
295
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600296 adreno_dev->next_rb = next;
297
298 /* Start the timer to detect a stuck preemption */
299 mod_timer(&adreno_dev->preempt.timer,
300 jiffies + msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT));
301
302 trace_adreno_preempt_trigger(adreno_dev->cur_rb, adreno_dev->next_rb);
303
304 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
305
306 /* Trigger the preemption */
307 adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT,
308 ((preempt_level << 6) & 0xC0) |
309 ((skipsaverestore << 9) & 0x200) |
310 ((usesgmem << 8) & 0x100) | 0x1);
311}
312
313void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit)
314{
315 unsigned int status;
316
317 if (!adreno_move_preempt_state(adreno_dev,
318 ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_PENDING))
319 return;
320
321 adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
322
323 if (status & 0x1) {
324 KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
325 "preempt interrupt with non-zero status: %X\n", status);
326
327 /*
328 * Under the assumption that this is a race between the
329 * interrupt and the register, schedule the worker to clean up.
330 * If the status still hasn't resolved itself by the time we get
331 * there then we have to assume something bad happened
332 */
333 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE);
334 adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
335 return;
336 }
337
338 del_timer(&adreno_dev->preempt.timer);
339
340 trace_adreno_preempt_done(adreno_dev->cur_rb,
341 adreno_dev->next_rb);
342
343 adreno_dev->prev_rb = adreno_dev->cur_rb;
344 adreno_dev->cur_rb = adreno_dev->next_rb;
345 adreno_dev->next_rb = NULL;
346
347 /* Update the wptr if it changed while preemption was ongoing */
348 _update_wptr(adreno_dev, true);
349
350 /* Update the dispatcher timer for the new command queue */
351 mod_timer(&adreno_dev->dispatcher.timer,
352 adreno_dev->cur_rb->dispatch_q.expires);
353
354 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
355
356 a6xx_preemption_trigger(adreno_dev);
357}
358
359void a6xx_preemption_schedule(struct adreno_device *adreno_dev)
360{
361 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
362
Harshdeep Dhatt38e57d72017-08-30 13:24:07 -0600363 if (!adreno_is_preemption_execution_enabled(adreno_dev))
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600364 return;
365
366 mutex_lock(&device->mutex);
367
368 if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE))
369 _a6xx_preemption_done(adreno_dev);
370
371 a6xx_preemption_trigger(adreno_dev);
372
373 mutex_unlock(&device->mutex);
374}
375
Harshdeep Dhattaae850c2017-08-21 17:19:26 -0600376unsigned int a6xx_set_marker(unsigned int *cmds, int start)
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600377{
378 *cmds++ = cp_type7_packet(CP_SET_MARKER, 1);
379
380 /*
381 * Indicate the beginning and end of the IB1 list with a SET_MARKER.
382 * Among other things, this will implicitly enable and disable
383 * preemption respectively.
384 */
385 if (start)
386 *cmds++ = 0xD;
387 else
388 *cmds++ = 0xE;
389
390 return 2;
391}
392
393unsigned int a6xx_preemption_pre_ibsubmit(
394 struct adreno_device *adreno_dev,
395 struct adreno_ringbuffer *rb,
396 unsigned int *cmds, struct kgsl_context *context)
397{
398 unsigned int *cmds_orig = cmds;
399
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600400 if (context)
401 *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 15);
402 else
403 *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 12);
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600404
405 /* NULL SMMU_INFO buffer - we track in KMD */
406 *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO;
407 cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
408
409 *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_NON_SECURE_SAVE_ADDR;
410 cmds += cp_gpuaddr(adreno_dev, cmds, rb->preemption_desc.gpuaddr);
411
412 *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR;
413 cmds += cp_gpuaddr(adreno_dev, cmds, 0);
414
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600415 if (context) {
416 uint64_t gpuaddr = context->user_ctxt_record->memdesc.gpuaddr;
417
418 *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR;
419 cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr);
420 }
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600421
422 /*
423 * There is no need to specify this address when we are about to
424 * trigger preemption. This is because CP internally stores this
425 * address specified here in the CP_SET_PSEUDO_REGISTER payload to
426 * the context record and thus knows from where to restore
427 * the saved perfcounters for the new ringbuffer.
428 */
429 *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_COUNTER;
430 cmds += cp_gpuaddr(adreno_dev, cmds,
431 rb->perfcounter_save_restore_desc.gpuaddr);
432
433 return (unsigned int) (cmds - cmds_orig);
434}
435
436unsigned int a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
437 unsigned int *cmds)
438{
439 unsigned int *cmds_orig = cmds;
440
441 *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
442 cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
443 *cmds++ = 1;
444 *cmds++ = 0;
445
446 return (unsigned int) (cmds - cmds_orig);
447}
448
449void a6xx_preemption_start(struct adreno_device *adreno_dev)
450{
451 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
452 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
453 struct adreno_ringbuffer *rb;
454 unsigned int i;
455
Harshdeep Dhatt38e57d72017-08-30 13:24:07 -0600456 if (!adreno_is_preemption_execution_enabled(adreno_dev))
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600457 return;
458
459 /* Force the state to be clear */
460 adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
461
462 /* smmu_info is allocated and mapped in a6xx_preemption_iommu_init */
463 kgsl_sharedmem_writel(device, &iommu->smmu_info,
464 PREEMPT_SMMU_RECORD(magic), A6XX_CP_SMMU_INFO_MAGIC_REF);
465 kgsl_sharedmem_writeq(device, &iommu->smmu_info,
466 PREEMPT_SMMU_RECORD(ttbr0), MMU_DEFAULT_TTBR0(device));
467
468 /* The CP doesn't use the asid record, so poison it */
469 kgsl_sharedmem_writel(device, &iommu->smmu_info,
470 PREEMPT_SMMU_RECORD(asid), 0xDECAFBAD);
471 kgsl_sharedmem_writel(device, &iommu->smmu_info,
472 PREEMPT_SMMU_RECORD(context_idr),
473 MMU_DEFAULT_CONTEXTIDR(device));
474
475 adreno_writereg64(adreno_dev,
476 ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
477 ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
478 iommu->smmu_info.gpuaddr);
479
480 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
481 /*
482 * preemption_desc is allocated and mapped at init time,
483 * so no need to check sharedmem_writel return value
484 */
485 kgsl_sharedmem_writel(device, &rb->preemption_desc,
486 PREEMPT_RECORD(rptr), 0);
487 kgsl_sharedmem_writel(device, &rb->preemption_desc,
488 PREEMPT_RECORD(wptr), 0);
489
490 adreno_ringbuffer_set_pagetable(rb,
491 device->mmu.defaultpagetable);
492 }
493}
494
495static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
496 struct adreno_ringbuffer *rb, uint64_t counteraddr)
497{
498 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
499 int ret;
500
501 ret = kgsl_allocate_global(device, &rb->preemption_desc,
502 A6XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED,
503 "preemption_desc");
504 if (ret)
505 return ret;
506
507 ret = kgsl_allocate_global(device, &rb->perfcounter_save_restore_desc,
508 A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE, 0,
509 KGSL_MEMDESC_PRIVILEGED, "perfcounter_save_restore_desc");
510 if (ret)
511 return ret;
512
513 kgsl_sharedmem_writel(device, &rb->preemption_desc,
514 PREEMPT_RECORD(magic), A6XX_CP_CTXRECORD_MAGIC_REF);
515 kgsl_sharedmem_writel(device, &rb->preemption_desc,
516 PREEMPT_RECORD(info), 0);
517 kgsl_sharedmem_writel(device, &rb->preemption_desc,
518 PREEMPT_RECORD(data), 0);
519 kgsl_sharedmem_writel(device, &rb->preemption_desc,
520 PREEMPT_RECORD(cntl), A6XX_CP_RB_CNTL_DEFAULT);
521 kgsl_sharedmem_writel(device, &rb->preemption_desc,
522 PREEMPT_RECORD(rptr), 0);
523 kgsl_sharedmem_writel(device, &rb->preemption_desc,
524 PREEMPT_RECORD(wptr), 0);
525 kgsl_sharedmem_writeq(device, &rb->preemption_desc,
526 PREEMPT_RECORD(rptr_addr), SCRATCH_RPTR_GPU_ADDR(device,
527 rb->id));
528 kgsl_sharedmem_writeq(device, &rb->preemption_desc,
529 PREEMPT_RECORD(rbase), rb->buffer_desc.gpuaddr);
530 kgsl_sharedmem_writeq(device, &rb->preemption_desc,
531 PREEMPT_RECORD(counter), counteraddr);
532
533 return 0;
534}
535
536#ifdef CONFIG_QCOM_KGSL_IOMMU
537static int a6xx_preemption_iommu_init(struct adreno_device *adreno_dev)
538{
539 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
540 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
541
542 /* Allocate mem for storing preemption smmu record */
543 return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE,
544 KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED,
545 "smmu_info");
546}
547
548static void a6xx_preemption_iommu_close(struct adreno_device *adreno_dev)
549{
550 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
551 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
552
553 kgsl_free_global(device, &iommu->smmu_info);
554}
555#else
556static int a6xx_preemption_iommu_init(struct adreno_device *adreno_dev)
557{
558 return -ENODEV;
559}
560
561static void a6xx_preemption_iommu_close(struct adreno_device *adreno_dev)
562{
563}
564#endif
565
566static void a6xx_preemption_close(struct kgsl_device *device)
567{
568 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
569 struct adreno_preemption *preempt = &adreno_dev->preempt;
570 struct adreno_ringbuffer *rb;
571 unsigned int i;
572
573 del_timer(&preempt->timer);
574 kgsl_free_global(device, &preempt->counters);
575 a6xx_preemption_iommu_close(adreno_dev);
576
577 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
578 kgsl_free_global(device, &rb->preemption_desc);
579 kgsl_free_global(device, &rb->perfcounter_save_restore_desc);
580 }
581}
582
583int a6xx_preemption_init(struct adreno_device *adreno_dev)
584{
585 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
586 struct adreno_preemption *preempt = &adreno_dev->preempt;
587 struct adreno_ringbuffer *rb;
588 int ret;
589 unsigned int i;
590 uint64_t addr;
591
592 /* We are dependent on IOMMU to make preemption go on the CP side */
593 if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
594 return -ENODEV;
595
596 INIT_WORK(&preempt->work, _a6xx_preemption_worker);
597
598 setup_timer(&preempt->timer, _a6xx_preemption_timer,
599 (unsigned long) adreno_dev);
600
601 /* Allocate mem for storing preemption counters */
602 ret = kgsl_allocate_global(device, &preempt->counters,
603 adreno_dev->num_ringbuffers *
604 A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0,
605 "preemption_counters");
606 if (ret)
607 goto err;
608
609 addr = preempt->counters.gpuaddr;
610
611 /* Allocate mem for storing preemption switch record */
612 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
613 ret = a6xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
614 if (ret)
615 goto err;
616
617 addr += A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
618 }
619
620 ret = a6xx_preemption_iommu_init(adreno_dev);
621
622err:
623 if (ret)
624 a6xx_preemption_close(device);
625
626 return ret;
627}
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600628
629void a6xx_preemption_context_destroy(struct kgsl_context *context)
630{
631 struct kgsl_device *device = context->device;
632 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
633
Harshdeep Dhatt38e57d72017-08-30 13:24:07 -0600634 if (!adreno_is_preemption_setup_enabled(adreno_dev))
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600635 return;
636
637 gpumem_free_entry(context->user_ctxt_record);
638}
639
640int a6xx_preemption_context_init(struct kgsl_context *context)
641{
642 struct kgsl_device *device = context->device;
643 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
644
Harshdeep Dhatt38e57d72017-08-30 13:24:07 -0600645 if (!adreno_is_preemption_setup_enabled(adreno_dev))
Harshdeep Dhatt2e42f122017-05-31 17:27:19 -0600646 return 0;
647
648 context->user_ctxt_record = gpumem_alloc_entry(context->dev_priv,
649 A6XX_CP_CTXRECORD_USER_RESTORE_SIZE, 0);
650 if (IS_ERR(context->user_ctxt_record)) {
651 int ret = PTR_ERR(context->user_ctxt_record);
652
653 context->user_ctxt_record = NULL;
654 return ret;
655 }
656
657 return 0;
658}