blob: 15c68fb443f812bda7b1e7bf3cea8aee3c5d8203 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/slab.h>
14#include <linux/sched.h>
15#include <linux/log2.h>
16#include <linux/time.h>
17#include <linux/delay.h>
18
19#include "kgsl.h"
20#include "kgsl_sharedmem.h"
21#include "kgsl_trace.h"
22#include "kgsl_pwrctrl.h"
23
24#include "adreno.h"
25#include "adreno_iommu.h"
26#include "adreno_pm4types.h"
27#include "adreno_ringbuffer.h"
28
29#include "a3xx_reg.h"
30#include "adreno_a5xx.h"
31
32#define RB_HOSTPTR(_rb, _pos) \
33 ((unsigned int *) ((_rb)->buffer_desc.hostptr + \
34 ((_pos) * sizeof(unsigned int))))
35
36#define RB_GPUADDR(_rb, _pos) \
37 ((_rb)->buffer_desc.gpuaddr + ((_pos) * sizeof(unsigned int)))
38
39static void adreno_get_submit_time(struct adreno_device *adreno_dev,
40 struct adreno_submit_time *time)
41{
42 unsigned long flags;
43 /*
44 * Here we are attempting to create a mapping between the
45 * GPU time domain (alwayson counter) and the CPU time domain
46 * (local_clock) by sampling both values as close together as
47 * possible. This is useful for many types of debugging and
48 * profiling. In order to make this mapping as accurate as
49 * possible, we must turn off interrupts to avoid running
50 * interrupt handlers between the two samples.
51 */
52
53 local_irq_save(flags);
54
55 /* Read always on registers */
56 if (!adreno_is_a3xx(adreno_dev)) {
Lynus Vazd37f1d82017-05-24 16:39:15 +053057 adreno_readreg64(adreno_dev,
58 ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
59 ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
60 &time->ticks);
Shrenuj Bansala419c792016-10-20 14:05:11 -070061
62 /* Mask hi bits as they may be incorrect on some targets */
63 if (ADRENO_GPUREV(adreno_dev) >= 400 &&
64 ADRENO_GPUREV(adreno_dev) <= ADRENO_REV_A530)
65 time->ticks &= 0xFFFFFFFF;
66 } else
67 time->ticks = 0;
68
69 /* Get the kernel clock for time since boot */
70 time->ktime = local_clock();
71
72 /* Get the timeofday for the wall time (for the user) */
73 getnstimeofday(&time->utime);
74
75 local_irq_restore(flags);
76}
77
Carter Cooper2c7fecd2017-03-20 11:37:57 -060078/*
79 * Wait time before trying to write the register again.
80 * Hopefully the GMU has finished waking up during this delay.
81 */
82#define GMU_WAKEUP_DELAY 50
83/* Max amount of tries to wake up the GMU. */
84#define GMU_WAKEUP_RETRY_MAX 20
85
86/*
87 * Check the WRITEDROPPED0 bit in the
88 * FENCE_STATUS regsiter to check if the write went
89 * through. If it didn't then we retry the write.
90 */
91static inline void _gmu_wptr_update_if_dropped(struct adreno_device *adreno_dev,
92 struct adreno_ringbuffer *rb)
93{
94 unsigned int val, i;
95
96 for (i = 0; i < GMU_WAKEUP_RETRY_MAX; i++) {
97 adreno_read_gmureg(adreno_dev, ADRENO_REG_GMU_AHB_FENCE_STATUS,
98 &val);
99
100 /* If !writedropped, then wptr update was successful */
101 if (!(val & 0x1))
102 return;
103
104 /* Wait a small amount of time before trying again */
105 udelay(GMU_WAKEUP_DELAY);
106
107 /* Try to write WPTR again */
108 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->_wptr);
109 }
110
111 dev_err(adreno_dev->dev.dev, "GMU WPTR update timed out\n");
112}
113
114static void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700115 struct adreno_ringbuffer *rb)
116{
117 unsigned long flags;
118
119 spin_lock_irqsave(&rb->preempt_lock, flags);
120 if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
121
122 if (adreno_dev->cur_rb == rb) {
123 /*
124 * Let the pwrscale policy know that new commands have
125 * been submitted.
126 */
127 kgsl_pwrscale_busy(KGSL_DEVICE(adreno_dev));
128 adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
129 rb->_wptr);
Carter Cooper2c7fecd2017-03-20 11:37:57 -0600130
131 /*
132 * If GMU, ensure the write posted after a possible
133 * GMU wakeup (write could have dropped during wakeup)
134 */
135 if (kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev)))
136 _gmu_wptr_update_if_dropped(adreno_dev, rb);
137
Shrenuj Bansala419c792016-10-20 14:05:11 -0700138 }
139 }
140
141 rb->wptr = rb->_wptr;
142 spin_unlock_irqrestore(&rb->preempt_lock, flags);
143}
144
145void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb,
146 struct adreno_submit_time *time)
147{
148 struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
149
150 if (time != NULL)
151 adreno_get_submit_time(adreno_dev, time);
152
153 adreno_ringbuffer_wptr(adreno_dev, rb);
154}
155
156int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb,
157 struct adreno_submit_time *time, unsigned int timeout)
158{
159 struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
160
161 adreno_ringbuffer_submit(rb, time);
162 return adreno_spin_idle(adreno_dev, timeout);
163}
164
165unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
166 unsigned int dwords)
167{
168 struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
169 unsigned int rptr = adreno_get_rptr(rb);
170 unsigned int ret;
171
172 if (rptr <= rb->_wptr) {
173 unsigned int *cmds;
174
175 if (rb->_wptr + dwords <= (KGSL_RB_DWORDS - 2)) {
176 ret = rb->_wptr;
177 rb->_wptr = (rb->_wptr + dwords) % KGSL_RB_DWORDS;
178 return RB_HOSTPTR(rb, ret);
179 }
180
181 /*
182 * There isn't enough space toward the end of ringbuffer. So
183 * look for space from the beginning of ringbuffer upto the
184 * read pointer.
185 */
186 if (dwords < rptr) {
187 cmds = RB_HOSTPTR(rb, rb->_wptr);
188 *cmds = cp_packet(adreno_dev, CP_NOP,
189 KGSL_RB_DWORDS - rb->_wptr - 1);
190 rb->_wptr = dwords;
191 return RB_HOSTPTR(rb, 0);
192 }
193 }
194
195 if (rb->_wptr + dwords < rptr) {
196 ret = rb->_wptr;
197 rb->_wptr = (rb->_wptr + dwords) % KGSL_RB_DWORDS;
198 return RB_HOSTPTR(rb, ret);
199 }
200
201 return ERR_PTR(-ENOSPC);
202}
203
204/**
205 * adreno_ringbuffer_start() - Ringbuffer start
206 * @adreno_dev: Pointer to adreno device
207 * @start_type: Warm or cold start
208 */
209int adreno_ringbuffer_start(struct adreno_device *adreno_dev,
210 unsigned int start_type)
211{
212 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
213 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
214 struct adreno_ringbuffer *rb;
215 int i;
216
217 /* Setup the ringbuffers state before we start */
218 FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
219 kgsl_sharedmem_set(device, &(rb->buffer_desc),
220 0, 0xAA, KGSL_RB_SIZE);
221 kgsl_sharedmem_writel(device, &device->scratch,
222 SCRATCH_RPTR_OFFSET(rb->id), 0);
223 rb->wptr = 0;
224 rb->_wptr = 0;
225 rb->wptr_preempt_end = 0xFFFFFFFF;
226 rb->starve_timer_state =
227 ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
228 }
229
230 /* start is specific GPU rb */
231 return gpudev->rb_start(adreno_dev, start_type);
232}
233
234void adreno_ringbuffer_stop(struct adreno_device *adreno_dev)
235{
236 struct adreno_ringbuffer *rb;
237 int i;
238
239 FOR_EACH_RINGBUFFER(adreno_dev, rb, i)
240 kgsl_cancel_events(KGSL_DEVICE(adreno_dev), &(rb->events));
241}
242
243static int _rb_readtimestamp(struct kgsl_device *device,
244 void *priv, enum kgsl_timestamp_type type,
245 unsigned int *timestamp)
246{
247 return adreno_rb_readtimestamp(ADRENO_DEVICE(device), priv, type,
248 timestamp);
249}
250
251static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev,
252 int id)
253{
254 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffers[id];
255 int ret;
256 char name[64];
257
258 rb->id = id;
259
260 snprintf(name, sizeof(name), "rb_events-%d", id);
261 kgsl_add_event_group(&rb->events, NULL, name,
262 _rb_readtimestamp, rb);
263 rb->timestamp = 0;
264 init_waitqueue_head(&rb->ts_expire_waitq);
265
266 spin_lock_init(&rb->preempt_lock);
267
268 /*
269 * Allocate mem for storing RB pagetables and commands to
270 * switch pagetable
271 */
272 ret = kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->pagetable_desc,
273 PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED, "pagetable_desc");
274 if (ret)
275 return ret;
276 return kgsl_allocate_global(KGSL_DEVICE(adreno_dev), &rb->buffer_desc,
277 KGSL_RB_SIZE, KGSL_MEMFLAGS_GPUREADONLY,
278 0, "ringbuffer");
279}
280
281int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt)
282{
283 int status = 0;
284 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
285 int i;
286
287 if (nopreempt == false && ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION))
288 adreno_dev->num_ringbuffers = gpudev->num_prio_levels;
289 else
290 adreno_dev->num_ringbuffers = 1;
291
292 for (i = 0; i < adreno_dev->num_ringbuffers; i++) {
293 status = _adreno_ringbuffer_probe(adreno_dev, i);
294 if (status != 0)
295 break;
296 }
297
298 if (status)
299 adreno_ringbuffer_close(adreno_dev);
300 else
301 adreno_dev->cur_rb = &(adreno_dev->ringbuffers[0]);
302
303 return status;
304}
305
306static void _adreno_ringbuffer_close(struct adreno_device *adreno_dev,
307 struct adreno_ringbuffer *rb)
308{
309 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
310
311 kgsl_free_global(device, &rb->pagetable_desc);
312 kgsl_free_global(device, &rb->preemption_desc);
313
314 kgsl_free_global(device, &rb->buffer_desc);
315 kgsl_del_event_group(&rb->events);
316 memset(rb, 0, sizeof(struct adreno_ringbuffer));
317}
318
319void adreno_ringbuffer_close(struct adreno_device *adreno_dev)
320{
321 struct adreno_ringbuffer *rb;
322 int i;
323
324 FOR_EACH_RINGBUFFER(adreno_dev, rb, i)
325 _adreno_ringbuffer_close(adreno_dev, rb);
326}
327
328/*
329 * cp_secure_mode() - Put GPU in trusted mode
330 * @adreno_dev: Pointer to adreno device
331 * @cmds: Pointer to cmds to be put in the ringbuffer
332 * @set: 1 - secure mode, 0 - unsecure mode
333 *
334 * Add commands to the ringbuffer to put the GPU in secure mode
335 * or unsecure mode based on the variable set.
336 */
337int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds,
338 int set)
339{
340 uint *start = cmds;
341
342 if (adreno_is_a4xx(adreno_dev)) {
343 cmds += cp_wait_for_idle(adreno_dev, cmds);
344 /*
345 * The two commands will stall the PFP until the PFP-ME-AHB
346 * is drained and the GPU is idle. As soon as this happens,
347 * the PFP will start moving again.
348 */
349 cmds += cp_wait_for_me(adreno_dev, cmds);
350
351 /*
352 * Below commands are processed by ME. GPU will be
353 * idle when they are processed. But the PFP will continue
354 * to fetch instructions at the same time.
355 */
356 *cmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
357 *cmds++ = 0;
358 *cmds++ = cp_packet(adreno_dev, CP_WIDE_REG_WRITE, 2);
359 *cmds++ = adreno_getreg(adreno_dev,
360 ADRENO_REG_RBBM_SECVID_TRUST_CONTROL);
361 *cmds++ = set;
362 *cmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
363 *cmds++ = 1;
364
365 /* Stall PFP until all above commands are complete */
366 cmds += cp_wait_for_me(adreno_dev, cmds);
367 } else {
368 /*
369 * A5xx has a separate opcode specifically to put the GPU
370 * in and out of secure mode.
371 */
372 *cmds++ = cp_packet(adreno_dev, CP_SET_SECURE_MODE, 1);
373 *cmds++ = set;
374 }
375
376 return cmds - start;
377}
378
379static inline int cp_mem_write(struct adreno_device *adreno_dev,
380 unsigned int *cmds, uint64_t gpuaddr, unsigned int value)
381{
382 int dwords = 0;
383
384 cmds[dwords++] = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
385 dwords += cp_gpuaddr(adreno_dev, &cmds[dwords], gpuaddr);
386 cmds[dwords++] = value;
387
388 return dwords;
389}
390
391static int
392adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
393 unsigned int flags, unsigned int *cmds,
394 unsigned int sizedwords, uint32_t timestamp,
395 struct adreno_submit_time *time)
396{
397 struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
398 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
399 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
400 unsigned int *ringcmds, *start;
401 unsigned int total_sizedwords = sizedwords;
402 unsigned int i;
403 unsigned int context_id = 0;
404 bool profile_ready;
405 struct adreno_context *drawctxt = rb->drawctxt_active;
406 struct kgsl_context *context = NULL;
407 bool secured_ctxt = false;
408 static unsigned int _seq_cnt;
409
410 if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base) &&
411 !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
412 return -ENOENT;
413
414 /* On fault return error so that we don't keep submitting */
415 if (adreno_gpu_fault(adreno_dev) != 0)
416 return -EPROTO;
417
418 rb->timestamp++;
419
420 /* If this is a internal IB, use the global timestamp for it */
421 if (!drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
422 timestamp = rb->timestamp;
423 else {
424 context_id = drawctxt->base.id;
425 context = &drawctxt->base;
426 }
427
428 /*
429 * Note that we cannot safely take drawctxt->mutex here without
430 * potential mutex inversion with device->mutex which is held
431 * here. As a result, any other code that accesses this variable
432 * must also use device->mutex.
433 */
434 if (drawctxt) {
435 drawctxt->internal_timestamp = rb->timestamp;
436 if (drawctxt->base.flags & KGSL_CONTEXT_SECURE)
437 secured_ctxt = true;
438 }
439
440 /*
441 * If in stream ib profiling is enabled and there are counters
442 * assigned, then space needs to be reserved for profiling. This
443 * space in the ringbuffer is always consumed (might be filled with
444 * NOPs in error case. profile_ready needs to be consistent through
445 * the _addcmds call since it is allocating additional ringbuffer
446 * command space.
447 */
448 profile_ready = drawctxt &&
449 adreno_profile_assignments_ready(&adreno_dev->profile) &&
450 !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE);
451
452 /*
453 * reserve space to temporarily turn off protected mode
454 * error checking if needed
455 */
456 total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
457 /* 2 dwords to store the start of command sequence */
458 total_sizedwords += 2;
459 /* internal ib command identifier for the ringbuffer */
460 total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
461
462 total_sizedwords += (secured_ctxt) ? 26 : 0;
463
464 /* _seq mem write for each submission */
465 total_sizedwords += 4;
466
467 /* context rollover */
468 if (adreno_is_a3xx(adreno_dev))
469 total_sizedwords += 3;
470
471 /* For HLSQ updates below */
472 if (adreno_is_a4xx(adreno_dev) || adreno_is_a3xx(adreno_dev))
473 total_sizedwords += 4;
474
475 if (gpudev->preemption_pre_ibsubmit &&
476 adreno_is_preemption_enabled(adreno_dev))
477 total_sizedwords += 22;
478
479 if (gpudev->preemption_post_ibsubmit &&
480 adreno_is_preemption_enabled(adreno_dev))
481 total_sizedwords += 5;
482
483 /*
484 * a5xx uses 64 bit memory address. pm4 commands that involve read/write
485 * from memory take 4 bytes more than a4xx because of 64 bit addressing.
486 * This function is shared between gpucores, so reserve the max size
487 * required in ringbuffer and adjust the write pointer depending on
488 * gpucore at the end of this function.
489 */
490 total_sizedwords += 8; /* sop timestamp */
491 total_sizedwords += 5; /* eop timestamp */
492
493 if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
494 /* global timestamp without cache flush for non-zero context */
495 total_sizedwords += 4;
496 }
497
498 if (flags & KGSL_CMD_FLAGS_WFI)
499 total_sizedwords += 2; /* WFI */
500
501 if (profile_ready)
502 total_sizedwords += 8; /* space for pre_ib and post_ib */
503
504 /* Add space for the power on shader fixup if we need it */
505 if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP)
506 total_sizedwords += 9;
507
508 /*
509 * WAIT_MEM_WRITES - needed in the stall on fault case
510 * to prevent out of order CP operations that can result
511 * in a CACHE_FLUSH_TS interrupt storm
512 */
513 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
514 &adreno_dev->ft_pf_policy))
515 total_sizedwords += 1;
516
517 ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
518 if (IS_ERR(ringcmds))
519 return PTR_ERR(ringcmds);
520
521 start = ringcmds;
522
523 *ringcmds++ = cp_packet(adreno_dev, CP_NOP, 1);
524 *ringcmds++ = KGSL_CMD_IDENTIFIER;
525
526 if (adreno_is_preemption_enabled(adreno_dev) &&
527 gpudev->preemption_pre_ibsubmit)
528 ringcmds += gpudev->preemption_pre_ibsubmit(
529 adreno_dev, rb, ringcmds, context);
530
531 if (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) {
532 *ringcmds++ = cp_packet(adreno_dev, CP_NOP, 1);
533 *ringcmds++ = KGSL_CMD_INTERNAL_IDENTIFIER;
534 }
535
536 if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) {
537 /* Disable protected mode for the fixup */
538 *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
539 *ringcmds++ = 0;
540
541 *ringcmds++ = cp_packet(adreno_dev, CP_NOP, 1);
542 *ringcmds++ = KGSL_PWRON_FIXUP_IDENTIFIER;
543 *ringcmds++ = cp_mem_packet(adreno_dev,
544 CP_INDIRECT_BUFFER_PFE, 2, 1);
545 ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
546 adreno_dev->pwron_fixup.gpuaddr);
547 *ringcmds++ = adreno_dev->pwron_fixup_dwords;
548
549 /* Re-enable protected mode */
550 *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
551 *ringcmds++ = 1;
552 }
553
554 /* Add any IB required for profiling if it is enabled */
555 if (profile_ready)
556 adreno_profile_preib_processing(adreno_dev, drawctxt,
557 &flags, &ringcmds);
558
559 /* start-of-pipeline timestamp for the context */
560 if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
561 ringcmds += cp_mem_write(adreno_dev, ringcmds,
562 MEMSTORE_ID_GPU_ADDR(device, context_id, soptimestamp),
563 timestamp);
564
565 /* start-of-pipeline timestamp for the ringbuffer */
566 ringcmds += cp_mem_write(adreno_dev, ringcmds,
567 MEMSTORE_RB_GPU_ADDR(device, rb, soptimestamp), rb->timestamp);
568
569 if (secured_ctxt)
570 ringcmds += cp_secure_mode(adreno_dev, ringcmds, 1);
571
572 if (flags & KGSL_CMD_FLAGS_PMODE) {
573 /* disable protected mode error checking */
574 *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
575 *ringcmds++ = 0;
576 }
577
578 for (i = 0; i < sizedwords; i++)
579 *ringcmds++ = cmds[i];
580
581 if (flags & KGSL_CMD_FLAGS_PMODE) {
582 /* re-enable protected mode error checking */
583 *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
584 *ringcmds++ = 1;
585 }
586
587 /*
588 * Flush HLSQ lazy updates to make sure there are no
589 * resources pending for indirect loads after the timestamp
590 */
591 if (adreno_is_a4xx(adreno_dev) || adreno_is_a3xx(adreno_dev)) {
592 *ringcmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
593 *ringcmds++ = 0x07; /* HLSQ_FLUSH */
594 ringcmds += cp_wait_for_idle(adreno_dev, ringcmds);
595 }
596
597 /*
598 * Add any postIB required for profiling if it is enabled and has
599 * assigned counters
600 */
601 if (profile_ready)
602 adreno_profile_postib_processing(adreno_dev, &flags, &ringcmds);
603
604 /*
605 * WAIT_MEM_WRITES - needed in the stall on fault case to prevent
606 * out of order CP operations that can result in a CACHE_FLUSH_TS
607 * interrupt storm
608 */
609 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
610 &adreno_dev->ft_pf_policy))
611 *ringcmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 0);
612
613 /*
614 * Do a unique memory write from the GPU. This can be used in
615 * early detection of timestamp interrupt storms to stave
616 * off system collapse.
617 */
618 ringcmds += cp_mem_write(adreno_dev, ringcmds,
619 MEMSTORE_ID_GPU_ADDR(device, KGSL_MEMSTORE_GLOBAL,
620 ref_wait_ts), ++_seq_cnt);
621
622 /*
623 * end-of-pipeline timestamp. If per context timestamps is not
624 * enabled, then drawctxt will be NULL or internal command flag will be
625 * set and hence the rb timestamp will be used in else statement below.
626 */
627 *ringcmds++ = cp_mem_packet(adreno_dev, CP_EVENT_WRITE, 3, 1);
628 if (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
629 *ringcmds++ = CACHE_FLUSH_TS | (1 << 31);
630 else
631 *ringcmds++ = CACHE_FLUSH_TS;
632
633 if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
634 ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
635 MEMSTORE_ID_GPU_ADDR(device, context_id, eoptimestamp));
636 *ringcmds++ = timestamp;
637
638 /* Write the end of pipeline timestamp to the ringbuffer too */
639 ringcmds += cp_mem_write(adreno_dev, ringcmds,
640 MEMSTORE_RB_GPU_ADDR(device, rb, eoptimestamp),
641 rb->timestamp);
642 } else {
643 ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
644 MEMSTORE_RB_GPU_ADDR(device, rb, eoptimestamp));
645 *ringcmds++ = timestamp;
646 }
647
648 if (adreno_is_a3xx(adreno_dev)) {
649 /* Dummy set-constant to trigger context rollover */
650 *ringcmds++ = cp_packet(adreno_dev, CP_SET_CONSTANT, 2);
651 *ringcmds++ =
652 (0x4<<16) | (A3XX_HLSQ_CL_KERNEL_GROUP_X_REG - 0x2000);
653 *ringcmds++ = 0;
654 }
655
656 if (flags & KGSL_CMD_FLAGS_WFI)
657 ringcmds += cp_wait_for_idle(adreno_dev, ringcmds);
658
659 if (secured_ctxt)
660 ringcmds += cp_secure_mode(adreno_dev, ringcmds, 0);
661
662 if (gpudev->preemption_post_ibsubmit &&
663 adreno_is_preemption_enabled(adreno_dev))
664 ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev,
665 ringcmds);
666
667 /*
668 * If we have more ringbuffer commands than space reserved
669 * in ringbuffer BUG() to fix this because it will lead to
670 * weird errors.
671 */
672 if ((ringcmds - start) > total_sizedwords)
673 BUG();
674 /*
675 * Allocate total_sizedwords space in RB, this is the max space
676 * required. If we have commands less than the space reserved in RB
677 * adjust the wptr accordingly.
678 */
679 rb->_wptr = rb->_wptr - (total_sizedwords - (ringcmds - start));
680
681 adreno_ringbuffer_submit(rb, time);
682
683 return 0;
684}
685
686int
687adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb,
688 unsigned int flags,
689 unsigned int *cmds,
690 int sizedwords)
691{
692 flags |= KGSL_CMD_FLAGS_INTERNAL_ISSUE;
693
694 return adreno_ringbuffer_addcmds(rb, flags, cmds,
695 sizedwords, 0, NULL);
696}
697
698static void adreno_ringbuffer_set_constraint(struct kgsl_device *device,
699 struct kgsl_drawobj *drawobj)
700{
701 struct kgsl_context *context = drawobj->context;
702 /*
703 * Check if the context has a constraint and constraint flags are
704 * set.
705 */
706 if (context->pwr_constraint.type &&
707 ((context->flags & KGSL_CONTEXT_PWR_CONSTRAINT) ||
708 (drawobj->flags & KGSL_CONTEXT_PWR_CONSTRAINT)))
709 kgsl_pwrctrl_set_constraint(device, &context->pwr_constraint,
710 context->id);
711}
712
713static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
714 unsigned int *cmds, uint64_t gpuaddr)
715{
716 unsigned int *p = cmds;
717
718 *p++ = cp_mem_packet(adreno_dev, CP_REG_TO_MEM, 2, 1);
719
720 /*
721 * For a4x and some a5x the alwayson_hi read through CPU
722 * will be masked. Only do 32 bit CP reads for keeping the
723 * numbers consistent
724 */
725 if (ADRENO_GPUREV(adreno_dev) >= 400 &&
726 ADRENO_GPUREV(adreno_dev) <= ADRENO_REV_A530)
727 *p++ = adreno_getreg(adreno_dev,
728 ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO);
729 else
730 *p++ = adreno_getreg(adreno_dev,
731 ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO) |
732 (1 << 30) | (2 << 18);
733 p += cp_gpuaddr(adreno_dev, p, gpuaddr);
734
735 return (unsigned int)(p - cmds);
736}
737
738/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
739int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
740 struct kgsl_drawobj_cmd *cmdobj,
741 struct adreno_submit_time *time)
742{
743 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
744 struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
745 struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
746 struct kgsl_memobj_node *ib;
747 unsigned int numibs = 0;
748 unsigned int *link;
749 unsigned int *cmds;
750 struct kgsl_context *context;
751 struct adreno_context *drawctxt;
752 bool use_preamble = true;
753 bool user_profiling = false;
754 bool kernel_profiling = false;
755 int flags = KGSL_CMD_FLAGS_NONE;
756 int ret;
757 struct adreno_ringbuffer *rb;
758 struct kgsl_drawobj_profiling_buffer *profile_buffer = NULL;
759 unsigned int dwords = 0;
760 struct adreno_submit_time local;
761
762 struct kgsl_mem_entry *entry = cmdobj->profiling_buf_entry;
763
764 if (entry)
765 profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
766 cmdobj->profiling_buffer_gpuaddr);
767
768 context = drawobj->context;
769 drawctxt = ADRENO_CONTEXT(context);
770
771 /* Get the total IBs in the list */
772 list_for_each_entry(ib, &cmdobj->cmdlist, node)
773 numibs++;
774
775 rb = drawctxt->rb;
776
777 /* process any profiling results that are available into the log_buf */
778 adreno_profile_process_results(adreno_dev);
779
780 /*
781 * If SKIP CMD flag is set for current context
782 * a) set SKIPCMD as fault_recovery for current commandbatch
783 * b) store context's commandbatch fault_policy in current
784 * commandbatch fault_policy and clear context's commandbatch
785 * fault_policy
786 * c) force preamble for commandbatch
787 */
788 if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) &&
789 (!test_bit(CMDOBJ_SKIP, &cmdobj->priv))) {
790
791 set_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_recovery);
792 cmdobj->fault_policy = drawctxt->fault_policy;
793 set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
794
795 /* if context is detached print fault recovery */
796 adreno_fault_skipcmd_detached(adreno_dev, drawctxt, drawobj);
797
798 /* clear the drawctxt flags */
799 clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv);
800 drawctxt->fault_policy = 0;
801 }
802
803 /*
804 * When preamble is enabled, the preamble buffer with state restoration
805 * commands are stored in the first node of the IB chain.
806 * We can skip that if a context switch hasn't occurred.
807 */
808
809 if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) &&
810 !test_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv) &&
811 (rb->drawctxt_active == drawctxt))
812 use_preamble = false;
813
814 /*
815 * In skip mode don't issue the draw IBs but keep all the other
816 * accoutrements of a submision (including the interrupt) to keep
817 * the accounting sane. Set start_index and numibs to 0 to just
818 * generate the start and end markers and skip everything else
819 */
820 if (test_bit(CMDOBJ_SKIP, &cmdobj->priv)) {
821 use_preamble = false;
822 numibs = 0;
823 }
824
825 /*
826 * a5xx uses 64 bit memory address. pm4 commands that involve read/write
827 * from memory take 4 bytes more than a4xx because of 64 bit addressing.
828 * This function is shared between gpucores, so reserve the max size
829 * required and adjust the number of commands before calling addcmds.
830 * Each submission needs 7 dwords max for wrappers and other red tape.
831 */
832 dwords = 7;
833
834 /* Each IB takes up 30 dwords in worst case */
835 dwords += (numibs * 30);
836
837 if (drawobj->flags & KGSL_DRAWOBJ_PROFILING &&
838 !adreno_is_a3xx(adreno_dev) && profile_buffer) {
839 user_profiling = true;
840 dwords += 6;
841
842 /*
Lynus Vazb6b96a42017-03-27 18:20:00 +0530843 * REG_TO_MEM packet on A5xx and above needs another ordinal.
Shrenuj Bansala419c792016-10-20 14:05:11 -0700844 * Add 2 more dwords since we do profiling before and after.
845 */
Lynus Vazb6b96a42017-03-27 18:20:00 +0530846 if (!ADRENO_LEGACY_PM4(adreno_dev))
Shrenuj Bansala419c792016-10-20 14:05:11 -0700847 dwords += 2;
848
849 /*
850 * we want to use an adreno_submit_time struct to get the
851 * precise moment when the command is submitted to the
852 * ringbuffer. If an upstream caller already passed down a
853 * pointer piggyback on that otherwise use a local struct
854 */
855
856 if (time == NULL)
857 time = &local;
858 }
859
860 if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) {
861 kernel_profiling = true;
862 dwords += 6;
Lynus Vazb6b96a42017-03-27 18:20:00 +0530863 if (!ADRENO_LEGACY_PM4(adreno_dev))
Shrenuj Bansala419c792016-10-20 14:05:11 -0700864 dwords += 2;
865 }
866
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600867 if (adreno_is_preemption_enabled(adreno_dev)) {
868 if (gpudev->preemption_set_marker)
869 dwords += 4;
870 else if (gpudev->preemption_yield_enable)
871 dwords += 8;
872 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700873
874 link = kcalloc(dwords, sizeof(unsigned int), GFP_KERNEL);
875 if (!link) {
876 ret = -ENOMEM;
877 goto done;
878 }
879
880 cmds = link;
881
882 *cmds++ = cp_packet(adreno_dev, CP_NOP, 1);
883 *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
884
885 if (kernel_profiling) {
886 cmds += _get_alwayson_counter(adreno_dev, cmds,
887 adreno_dev->profile_buffer.gpuaddr +
888 ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index,
889 started));
890 }
891
892 /*
893 * Add cmds to read the GPU ticks at the start of command obj and
894 * write it into the appropriate command obj profiling buffer offset
895 */
896 if (user_profiling) {
897 cmds += _get_alwayson_counter(adreno_dev, cmds,
898 cmdobj->profiling_buffer_gpuaddr +
899 offsetof(struct kgsl_drawobj_profiling_buffer,
900 gpu_ticks_submitted));
901 }
902
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600903 if (gpudev->preemption_set_marker &&
904 adreno_is_preemption_enabled(adreno_dev))
905 cmds += gpudev->preemption_set_marker(cmds, 1);
906
Shrenuj Bansala419c792016-10-20 14:05:11 -0700907 if (numibs) {
908 list_for_each_entry(ib, &cmdobj->cmdlist, node) {
909 /*
910 * Skip 0 sized IBs - these are presumed to have been
911 * removed from consideration by the FT policy
912 */
913 if (ib->priv & MEMOBJ_SKIP ||
914 (ib->priv & MEMOBJ_PREAMBLE &&
915 use_preamble == false))
916 *cmds++ = cp_mem_packet(adreno_dev, CP_NOP,
917 3, 1);
918
919 *cmds++ = cp_mem_packet(adreno_dev,
920 CP_INDIRECT_BUFFER_PFE, 2, 1);
921 cmds += cp_gpuaddr(adreno_dev, cmds, ib->gpuaddr);
922 *cmds++ = (unsigned int) ib->size >> 2;
923 /* preamble is required on only for first command */
924 use_preamble = false;
925 }
926 }
927
Harshdeep Dhatt0cdc8992017-05-31 15:44:05 -0600928 if (adreno_is_preemption_enabled(adreno_dev)) {
929 if (gpudev->preemption_set_marker)
930 cmds += gpudev->preemption_set_marker(cmds, 0);
931 else if (gpudev->preemption_yield_enable)
932 cmds += gpudev->preemption_yield_enable(cmds);
933 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700934
935 if (kernel_profiling) {
936 cmds += _get_alwayson_counter(adreno_dev, cmds,
937 adreno_dev->profile_buffer.gpuaddr +
938 ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index,
939 retired));
940 }
941
942 /*
943 * Add cmds to read the GPU ticks at the end of command obj and
944 * write it into the appropriate command obj profiling buffer offset
945 */
946 if (user_profiling) {
947 cmds += _get_alwayson_counter(adreno_dev, cmds,
948 cmdobj->profiling_buffer_gpuaddr +
949 offsetof(struct kgsl_drawobj_profiling_buffer,
950 gpu_ticks_retired));
951 }
952
953 *cmds++ = cp_packet(adreno_dev, CP_NOP, 1);
954 *cmds++ = KGSL_END_OF_IB_IDENTIFIER;
955
956 /* Context switches commands should *always* be on the GPU */
957 ret = adreno_drawctxt_switch(adreno_dev, rb, drawctxt,
958 ADRENO_CONTEXT_SWITCH_FORCE_GPU);
959
960 /*
961 * In the unlikely event of an error in the drawctxt switch,
962 * treat it like a hang
963 */
964 if (ret) {
965 /*
966 * It is "normal" to get a -ENOSPC or a -ENOENT. Don't log it,
967 * the upper layers know how to handle it
968 */
969 if (ret != -ENOSPC && ret != -ENOENT)
970 KGSL_DRV_ERR(device,
971 "Unable to switch draw context: %d\n", ret);
972 goto done;
973 }
974
975 if (test_bit(CMDOBJ_WFI, &cmdobj->priv))
976 flags = KGSL_CMD_FLAGS_WFI;
977
978 /*
979 * For some targets, we need to execute a dummy shader operation after a
980 * power collapse
981 */
982
983 if (test_and_clear_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv) &&
984 test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv))
985 flags |= KGSL_CMD_FLAGS_PWRON_FIXUP;
986
987 /* Set the constraints before adding to ringbuffer */
988 adreno_ringbuffer_set_constraint(device, drawobj);
989
990 ret = adreno_ringbuffer_addcmds(rb, flags,
991 &link[0], (cmds - link),
992 drawobj->timestamp, time);
993
994 if (!ret) {
Hareesh Gundu7d5a8f22017-02-21 13:23:46 +0530995 set_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700996 cmdobj->global_ts = drawctxt->internal_timestamp;
997
998 /* Put the timevalues in the profiling buffer */
999 if (user_profiling) {
1000 /*
1001 * Return kernel clock time to the the client
1002 * if requested
1003 */
1004 if (drawobj->flags & KGSL_DRAWOBJ_PROFILING_KTIME) {
1005 uint64_t secs = time->ktime;
1006
1007 profile_buffer->wall_clock_ns =
1008 do_div(secs, NSEC_PER_SEC);
1009 profile_buffer->wall_clock_s = secs;
1010 } else {
1011 profile_buffer->wall_clock_s =
1012 time->utime.tv_sec;
1013 profile_buffer->wall_clock_ns =
1014 time->utime.tv_nsec;
1015 }
1016 profile_buffer->gpu_ticks_queued = time->ticks;
1017 }
1018 }
1019
1020done:
1021 /* Corresponding unmap to the memdesc map of profile_buffer */
1022 if (entry)
1023 kgsl_memdesc_unmap(&entry->memdesc);
1024
1025
1026 trace_kgsl_issueibcmds(device, context->id, numibs, drawobj->timestamp,
1027 drawobj->flags, ret, drawctxt->type);
1028
1029 kfree(link);
1030 return ret;
1031}
1032
1033/**
1034 * adreno_ringbuffer_wait_callback() - Callback function for event registered
1035 * on a ringbuffer timestamp
1036 * @device: Device for which the the callback is valid
1037 * @context: The context of the event
1038 * @priv: The private parameter of the event
1039 * @result: Result of the event trigger
1040 */
1041static void adreno_ringbuffer_wait_callback(struct kgsl_device *device,
1042 struct kgsl_event_group *group,
1043 void *priv, int result)
1044{
1045 struct adreno_ringbuffer *rb = group->priv;
1046
1047 wake_up_all(&rb->ts_expire_waitq);
1048}
1049
1050/* check if timestamp is greater than the current rb timestamp */
1051static inline int adreno_ringbuffer_check_timestamp(
1052 struct adreno_ringbuffer *rb,
1053 unsigned int timestamp, int type)
1054{
1055 struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
1056 unsigned int ts;
1057
1058 adreno_rb_readtimestamp(adreno_dev, rb, type, &ts);
1059 return (timestamp_cmp(ts, timestamp) >= 0);
1060}
1061
1062
1063/**
1064 * adreno_ringbuffer_waittimestamp() - Wait for a RB timestamp
1065 * @rb: The ringbuffer to wait on
1066 * @timestamp: The timestamp to wait for
1067 * @msecs: The wait timeout period
1068 */
1069int adreno_ringbuffer_waittimestamp(struct adreno_ringbuffer *rb,
1070 unsigned int timestamp,
1071 unsigned int msecs)
1072{
1073 struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
1074 struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
1075 int ret;
1076 unsigned long wait_time;
1077
1078 /* check immediately if timeout is 0 */
1079 if (msecs == 0)
1080 return adreno_ringbuffer_check_timestamp(rb,
1081 timestamp, KGSL_TIMESTAMP_RETIRED) ? 0 : -EBUSY;
1082
1083 ret = kgsl_add_event(device, &rb->events, timestamp,
1084 adreno_ringbuffer_wait_callback, NULL);
1085 if (ret)
1086 return ret;
1087
1088 mutex_unlock(&device->mutex);
1089
1090 wait_time = msecs_to_jiffies(msecs);
1091 if (wait_event_timeout(rb->ts_expire_waitq,
1092 !kgsl_event_pending(device, &rb->events, timestamp,
1093 adreno_ringbuffer_wait_callback, NULL),
1094 wait_time) == 0)
1095 ret = -ETIMEDOUT;
1096
1097 mutex_lock(&device->mutex);
1098 /*
1099 * after wake up make sure that expected timestamp has retired
1100 * because the wakeup could have happened due to a cancel event
1101 */
1102 if (!ret && !adreno_ringbuffer_check_timestamp(rb,
1103 timestamp, KGSL_TIMESTAMP_RETIRED)) {
1104 ret = -EAGAIN;
1105 }
1106
1107 return ret;
1108}