blob: b32cb63d304a3354a39b08fd4d10035792ed3005 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/spinlock.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/iommu.h>
20#include <linux/msm_kgsl.h>
21#include <linux/ratelimit.h>
22#include <linux/of_platform.h>
23#include <soc/qcom/scm.h>
24#include <soc/qcom/secure_buffer.h>
25#include <stddef.h>
26#include <linux/compat.h>
27
28#include "kgsl.h"
29#include "kgsl_device.h"
30#include "kgsl_mmu.h"
31#include "kgsl_sharedmem.h"
32#include "kgsl_iommu.h"
33#include "adreno_pm4types.h"
34#include "adreno.h"
35#include "kgsl_trace.h"
36#include "kgsl_pwrctrl.h"
37
38#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
39
40#define ADDR_IN_GLOBAL(_a) \
41 (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \
42 ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE)))
43
44static struct kgsl_mmu_pt_ops iommu_pt_ops;
45static bool need_iommu_sync;
46
47const unsigned int kgsl_iommu_reg_list[KGSL_IOMMU_REG_MAX] = {
48 0x0,/* SCTLR */
49 0x20,/* TTBR0 */
50 0x34,/* CONTEXTIDR */
51 0x58,/* FSR */
52 0x60,/* FAR_0 */
53 0x618,/* TLBIALL */
54 0x008,/* RESUME */
55 0x68,/* FSYNR0 */
56 0x6C,/* FSYNR1 */
57 0x7F0,/* TLBSYNC */
58 0x7F4,/* TLBSTATUS */
59};
60
61/*
62 * struct kgsl_iommu_addr_entry - entry in the kgsl_iommu_pt rbtree.
63 * @base: starting virtual address of the entry
64 * @size: size of the entry
65 * @node: the rbtree node
66 *
67 */
68struct kgsl_iommu_addr_entry {
69 uint64_t base;
70 uint64_t size;
71 struct rb_node node;
72};
73
74static struct kmem_cache *addr_entry_cache;
75
76/*
77 * There are certain memory allocations (ringbuffer, memstore, etc) that need to
78 * be present at the same address in every pagetable. We call these "global"
79 * pagetable entries. There are relatively few of these and they are mostly
80 * stable (defined at init time) but the actual number of globals can differ
81 * slight depending on the target and implementation.
82 *
83 * Here we define an array and a simple allocator to keep track of the currently
84 * active global entries. Each entry is assigned a unique address inside of a
85 * MMU implementation specific "global" region. The addresses are assigned
86 * sequentially and never re-used to avoid having to go back and reprogram
87 * existing pagetables. The entire list of active entries are mapped and
88 * unmapped into every new pagetable as it is created and destroyed.
89 *
90 * Because there are relatively few entries and they are defined at boot time we
91 * don't need to go over the top to define a dynamic allocation scheme. It will
92 * be less wasteful to pick a static number with a little bit of growth
93 * potential.
94 */
95
96#define GLOBAL_PT_ENTRIES 32
97
98struct global_pt_entry {
99 struct kgsl_memdesc *memdesc;
100 char name[32];
101};
102
103static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES];
104static struct kgsl_memdesc *kgsl_global_secure_pt_entry;
105static int global_pt_count;
106uint64_t global_pt_alloc;
107static struct kgsl_memdesc gpu_qdss_desc;
108
109void kgsl_print_global_pt_entries(struct seq_file *s)
110{
111 int i;
112
113 for (i = 0; i < global_pt_count; i++) {
114 struct kgsl_memdesc *memdesc = global_pt_entries[i].memdesc;
115
116 if (memdesc == NULL)
117 continue;
118
119 seq_printf(s, "0x%16.16llX-0x%16.16llX %16llu %s\n",
120 memdesc->gpuaddr, memdesc->gpuaddr + memdesc->size - 1,
121 memdesc->size, global_pt_entries[i].name);
122 }
123}
124
125static void kgsl_iommu_unmap_globals(struct kgsl_pagetable *pagetable)
126{
127 unsigned int i;
128
129 for (i = 0; i < global_pt_count; i++) {
130 if (global_pt_entries[i].memdesc != NULL)
131 kgsl_mmu_unmap(pagetable,
132 global_pt_entries[i].memdesc);
133 }
134}
135
136static int kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable)
137{
138 unsigned int i;
139
140 for (i = 0; i < global_pt_count; i++) {
141 if (global_pt_entries[i].memdesc != NULL) {
142 int ret = kgsl_mmu_map(pagetable,
143 global_pt_entries[i].memdesc);
144
145 if (ret)
146 return ret;
147 }
148 }
149
150 return 0;
151}
152
153static void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_pagetable
154 *pagetable)
155{
156 struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry;
157
158 if (entry != NULL)
159 kgsl_mmu_unmap(pagetable, entry);
160
161}
162
163static int kgsl_map_global_secure_pt_entry(struct kgsl_pagetable *pagetable)
164{
165 int ret = 0;
166 struct kgsl_memdesc *entry = kgsl_global_secure_pt_entry;
167
168 if (entry != NULL) {
169 entry->pagetable = pagetable;
170 ret = kgsl_mmu_map(pagetable, entry);
171 }
172 return ret;
173}
174
175static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu,
176 struct kgsl_memdesc *memdesc)
177{
178 int i;
179
180 if (memdesc->gpuaddr == 0 || !(memdesc->priv & KGSL_MEMDESC_GLOBAL))
181 return;
182
183 for (i = 0; i < global_pt_count; i++) {
184 if (global_pt_entries[i].memdesc == memdesc) {
185 memdesc->gpuaddr = 0;
186 memdesc->priv &= ~KGSL_MEMDESC_GLOBAL;
187 global_pt_entries[i].memdesc = NULL;
188 return;
189 }
190 }
191}
192
193static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
194 struct kgsl_memdesc *memdesc, const char *name)
195{
196 if (memdesc->gpuaddr != 0)
197 return;
198
199 /*Check that we can fit the global allocations */
200 if (WARN_ON(global_pt_count >= GLOBAL_PT_ENTRIES) ||
201 WARN_ON((global_pt_alloc + memdesc->size) >=
202 KGSL_IOMMU_GLOBAL_MEM_SIZE))
203 return;
204
205 memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE + global_pt_alloc;
206 memdesc->priv |= KGSL_MEMDESC_GLOBAL;
207 global_pt_alloc += memdesc->size;
208
209 global_pt_entries[global_pt_count].memdesc = memdesc;
210 strlcpy(global_pt_entries[global_pt_count].name, name,
211 sizeof(global_pt_entries[global_pt_count].name));
212 global_pt_count++;
213}
214
215void kgsl_add_global_secure_entry(struct kgsl_device *device,
216 struct kgsl_memdesc *memdesc)
217{
218 memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE;
219 kgsl_global_secure_pt_entry = memdesc;
220}
221
222struct kgsl_memdesc *kgsl_iommu_get_qdss_global_entry(void)
223{
224 return &gpu_qdss_desc;
225}
226
227static void kgsl_setup_qdss_desc(struct kgsl_device *device)
228{
229 int result = 0;
230 uint32_t gpu_qdss_entry[2];
231
232 if (!of_find_property(device->pdev->dev.of_node,
233 "qcom,gpu-qdss-stm", NULL))
234 return;
235
236 if (of_property_read_u32_array(device->pdev->dev.of_node,
237 "qcom,gpu-qdss-stm", gpu_qdss_entry, 2)) {
238 KGSL_CORE_ERR("Failed to read gpu qdss dts entry\n");
239 return;
240 }
241
242 gpu_qdss_desc.flags = 0;
243 gpu_qdss_desc.priv = 0;
244 gpu_qdss_desc.physaddr = gpu_qdss_entry[0];
245 gpu_qdss_desc.size = gpu_qdss_entry[1];
246 gpu_qdss_desc.pagetable = NULL;
247 gpu_qdss_desc.ops = NULL;
248 gpu_qdss_desc.dev = device->dev->parent;
249 gpu_qdss_desc.hostptr = NULL;
250
251 result = memdesc_sg_dma(&gpu_qdss_desc, gpu_qdss_desc.physaddr,
252 gpu_qdss_desc.size);
253 if (result) {
254 KGSL_CORE_ERR("memdesc_sg_dma failed: %d\n", result);
255 return;
256 }
257
258 kgsl_mmu_add_global(device, &gpu_qdss_desc, "gpu-qdss");
259}
260
261static inline void kgsl_cleanup_qdss_desc(struct kgsl_mmu *mmu)
262{
263 kgsl_iommu_remove_global(mmu, &gpu_qdss_desc);
264 kgsl_sharedmem_free(&gpu_qdss_desc);
265}
266
267
268static inline void _iommu_sync_mmu_pc(bool lock)
269{
270 if (need_iommu_sync == false)
271 return;
272
273 if (lock)
274 mutex_lock(&kgsl_mmu_sync);
275 else
276 mutex_unlock(&kgsl_mmu_sync);
277}
278
279static void _detach_pt(struct kgsl_iommu_pt *iommu_pt,
280 struct kgsl_iommu_context *ctx)
281{
282 if (iommu_pt->attached) {
283 _iommu_sync_mmu_pc(true);
284 iommu_detach_device(iommu_pt->domain, ctx->dev);
285 _iommu_sync_mmu_pc(false);
286 iommu_pt->attached = false;
287 }
288}
289
290static int _attach_pt(struct kgsl_iommu_pt *iommu_pt,
291 struct kgsl_iommu_context *ctx)
292{
293 int ret;
294
295 if (iommu_pt->attached)
296 return 0;
297
298 _iommu_sync_mmu_pc(true);
299 ret = iommu_attach_device(iommu_pt->domain, ctx->dev);
300 _iommu_sync_mmu_pc(false);
301
302 if (ret == 0)
303 iommu_pt->attached = true;
304
305 return ret;
306}
307
308static int _lock_if_secure_mmu(struct kgsl_memdesc *memdesc,
309 struct kgsl_mmu *mmu)
310{
311 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
312
313 if (!kgsl_memdesc_is_secured(memdesc))
314 return 0;
315
316 if (!kgsl_mmu_is_secured(mmu))
317 return -EINVAL;
318
319 mutex_lock(&device->mutex);
320 if (kgsl_active_count_get(device)) {
321 mutex_unlock(&device->mutex);
322 return -EINVAL;
323 }
324
325 return 0;
326}
327
328static void _unlock_if_secure_mmu(struct kgsl_memdesc *memdesc,
329 struct kgsl_mmu *mmu)
330{
331 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
332
333 if (!kgsl_memdesc_is_secured(memdesc) || !kgsl_mmu_is_secured(mmu))
334 return;
335
336 kgsl_active_count_put(device);
337 mutex_unlock(&device->mutex);
338}
339
340static int _iommu_map_sync_pc(struct kgsl_pagetable *pt,
341 struct kgsl_memdesc *memdesc,
342 uint64_t gpuaddr, phys_addr_t physaddr,
343 uint64_t size, unsigned int flags)
344{
345 struct kgsl_iommu_pt *iommu_pt = pt->priv;
346 int ret;
347
348 ret = _lock_if_secure_mmu(memdesc, pt->mmu);
349 if (ret)
350 return ret;
351
352 _iommu_sync_mmu_pc(true);
353
354 ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr, size, flags);
355
356 _iommu_sync_mmu_pc(false);
357
358 _unlock_if_secure_mmu(memdesc, pt->mmu);
359
360 if (ret) {
361 KGSL_CORE_ERR("map err: 0x%016llX, 0x%llx, 0x%x, %d\n",
362 gpuaddr, size, flags, ret);
363 return -ENODEV;
364 }
365
366 return 0;
367}
368
369static int _iommu_unmap_sync_pc(struct kgsl_pagetable *pt,
370 struct kgsl_memdesc *memdesc, uint64_t addr, uint64_t size)
371{
372 struct kgsl_iommu_pt *iommu_pt = pt->priv;
373 size_t unmapped = 0;
374 int ret;
375
376 ret = _lock_if_secure_mmu(memdesc, pt->mmu);
377 if (ret)
378 return ret;
379
380 _iommu_sync_mmu_pc(true);
381
382 unmapped = iommu_unmap(iommu_pt->domain, addr, size);
383
384 _iommu_sync_mmu_pc(false);
385
386 _unlock_if_secure_mmu(memdesc, pt->mmu);
387
388 if (unmapped != size) {
389 KGSL_CORE_ERR("unmap err: 0x%016llx, 0x%llx, %zd\n",
390 addr, size, unmapped);
391 return -ENODEV;
392 }
393
394 return 0;
395}
396
397static int _iommu_map_sg_offset_sync_pc(struct kgsl_pagetable *pt,
398 uint64_t addr, struct kgsl_memdesc *memdesc,
399 struct scatterlist *sg, int nents,
400 uint64_t offset, uint64_t size, unsigned int flags)
401{
402 struct kgsl_iommu_pt *iommu_pt = pt->priv;
403 uint64_t offset_tmp = offset;
404 uint64_t size_tmp = size;
405 size_t mapped = 0;
406 unsigned int i;
407 struct scatterlist *s;
408 phys_addr_t physaddr;
409 int ret;
410
411 ret = _lock_if_secure_mmu(memdesc, pt->mmu);
412 if (ret)
413 return ret;
414
415 _iommu_sync_mmu_pc(true);
416
417 for_each_sg(sg, s, nents, i) {
418 /* Iterate until we find the offset */
419 if (offset_tmp >= s->length) {
420 offset_tmp -= s->length;
421 continue;
422 }
423
424 /* How much mapping is needed in this sg? */
425 if (size < s->length - offset_tmp)
426 size_tmp = size;
427 else
428 size_tmp = s->length - offset_tmp;
429
430 /* Get the phys addr for the offset page */
431 if (offset_tmp != 0) {
432 physaddr = page_to_phys(nth_page(sg_page(s),
433 offset_tmp >> PAGE_SHIFT));
434 /* Reset offset_tmp */
435 offset_tmp = 0;
436 } else
437 physaddr = page_to_phys(sg_page(s));
438
439 /* Do the map for this sg */
440 ret = iommu_map(iommu_pt->domain, addr + mapped,
441 physaddr, size_tmp, flags);
442 if (ret)
443 break;
444
445 mapped += size_tmp;
446 size -= size_tmp;
447
448 if (size == 0)
449 break;
450 }
451
452 _iommu_sync_mmu_pc(false);
453
454 _unlock_if_secure_mmu(memdesc, pt->mmu);
455
456 if (size != 0) {
457 /* Cleanup on error */
458 _iommu_unmap_sync_pc(pt, memdesc, addr, mapped);
459 KGSL_CORE_ERR(
460 "map sg offset err: 0x%016llX, %d, %x, %zd\n",
461 addr, nents, flags, mapped);
462 return -ENODEV;
463 }
464
465 return 0;
466}
467
468static int _iommu_map_sg_sync_pc(struct kgsl_pagetable *pt,
469 uint64_t addr, struct kgsl_memdesc *memdesc,
470 struct scatterlist *sg, int nents,
471 unsigned int flags)
472{
473 struct kgsl_iommu_pt *iommu_pt = pt->priv;
474 size_t mapped;
475 int ret;
476
477 ret = _lock_if_secure_mmu(memdesc, pt->mmu);
478 if (ret)
479 return ret;
480
481 _iommu_sync_mmu_pc(true);
482
483 mapped = iommu_map_sg(iommu_pt->domain, addr, sg, nents, flags);
484
485 _iommu_sync_mmu_pc(false);
486
487 _unlock_if_secure_mmu(memdesc, pt->mmu);
488
489 if (mapped == 0) {
490 KGSL_CORE_ERR("map sg err: 0x%016llX, %d, %x, %zd\n",
491 addr, nents, flags, mapped);
492 return -ENODEV;
493 }
494
495 return 0;
496}
497
498/*
499 * One page allocation for a guard region to protect against over-zealous
500 * GPU pre-fetch
501 */
502
503static struct page *kgsl_guard_page;
504static struct kgsl_memdesc kgsl_secure_guard_page_memdesc;
505
506/*
507 * The dummy page is a placeholder/extra page to be used for sparse mappings.
508 * This page will be mapped to all virtual sparse bindings that are not
509 * physically backed.
510 */
511static struct page *kgsl_dummy_page;
512
513/* These functions help find the nearest allocated memory entries on either side
514 * of a faulting address. If we know the nearby allocations memory we can
515 * get a better determination of what we think should have been located in the
516 * faulting region
517 */
518
519/*
520 * A local structure to make it easy to store the interesting bits for the
521 * memory entries on either side of the faulting address
522 */
523
524struct _mem_entry {
525 uint64_t gpuaddr;
526 uint64_t size;
527 uint64_t flags;
528 unsigned int priv;
529 int pending_free;
530 pid_t pid;
531 char name[32];
532};
533
534static void _get_global_entries(uint64_t faultaddr,
535 struct _mem_entry *prev,
536 struct _mem_entry *next)
537{
538 int i;
539 uint64_t prevaddr = 0;
540 struct global_pt_entry *p = NULL;
541
542 uint64_t nextaddr = (uint64_t) -1;
543 struct global_pt_entry *n = NULL;
544
545 for (i = 0; i < global_pt_count; i++) {
546 uint64_t addr;
547
548 if (global_pt_entries[i].memdesc == NULL)
549 continue;
550
551 addr = global_pt_entries[i].memdesc->gpuaddr;
552 if ((addr < faultaddr) && (addr > prevaddr)) {
553 prevaddr = addr;
554 p = &global_pt_entries[i];
555 }
556
557 if ((addr > faultaddr) && (addr < nextaddr)) {
558 nextaddr = addr;
559 n = &global_pt_entries[i];
560 }
561 }
562
563 if (p != NULL) {
564 prev->gpuaddr = p->memdesc->gpuaddr;
565 prev->size = p->memdesc->size;
566 prev->flags = p->memdesc->flags;
567 prev->priv = p->memdesc->priv;
568 prev->pid = 0;
569 strlcpy(prev->name, p->name, sizeof(prev->name));
570 }
571
572 if (n != NULL) {
573 next->gpuaddr = n->memdesc->gpuaddr;
574 next->size = n->memdesc->size;
575 next->flags = n->memdesc->flags;
576 next->priv = n->memdesc->priv;
577 next->pid = 0;
578 strlcpy(next->name, n->name, sizeof(next->name));
579 }
580}
581
582void __kgsl_get_memory_usage(struct _mem_entry *entry)
583{
584 kgsl_get_memory_usage(entry->name, sizeof(entry->name), entry->flags);
585}
586
587static void _get_entries(struct kgsl_process_private *private,
588 uint64_t faultaddr, struct _mem_entry *prev,
589 struct _mem_entry *next)
590{
591 int id;
592 struct kgsl_mem_entry *entry;
593
594 uint64_t prevaddr = 0;
595 struct kgsl_mem_entry *p = NULL;
596
597 uint64_t nextaddr = (uint64_t) -1;
598 struct kgsl_mem_entry *n = NULL;
599
600 idr_for_each_entry(&private->mem_idr, entry, id) {
601 uint64_t addr = entry->memdesc.gpuaddr;
602
603 if ((addr < faultaddr) && (addr > prevaddr)) {
604 prevaddr = addr;
605 p = entry;
606 }
607
608 if ((addr > faultaddr) && (addr < nextaddr)) {
609 nextaddr = addr;
610 n = entry;
611 }
612 }
613
614 if (p != NULL) {
615 prev->gpuaddr = p->memdesc.gpuaddr;
616 prev->size = p->memdesc.size;
617 prev->flags = p->memdesc.flags;
618 prev->priv = p->memdesc.priv;
619 prev->pending_free = p->pending_free;
620 prev->pid = private->pid;
621 __kgsl_get_memory_usage(prev);
622 }
623
624 if (n != NULL) {
625 next->gpuaddr = n->memdesc.gpuaddr;
626 next->size = n->memdesc.size;
627 next->flags = n->memdesc.flags;
628 next->priv = n->memdesc.priv;
629 next->pending_free = n->pending_free;
630 next->pid = private->pid;
631 __kgsl_get_memory_usage(next);
632 }
633}
634
635static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
636 struct _mem_entry *preventry, struct _mem_entry *nextentry,
637 struct kgsl_context *context)
638{
639 struct kgsl_process_private *private;
640
641 memset(preventry, 0, sizeof(*preventry));
642 memset(nextentry, 0, sizeof(*nextentry));
643
644 /* Set the maximum possible size as an initial value */
645 nextentry->gpuaddr = (uint64_t) -1;
646
647 if (ADDR_IN_GLOBAL(faultaddr)) {
648 _get_global_entries(faultaddr, preventry, nextentry);
649 } else if (context) {
650 private = context->proc_priv;
651 spin_lock(&private->mem_lock);
652 _get_entries(private, faultaddr, preventry, nextentry);
653 spin_unlock(&private->mem_lock);
654 }
655}
656
657static void _print_entry(struct kgsl_device *device, struct _mem_entry *entry)
658{
659 KGSL_LOG_DUMP(device,
660 "[%016llX - %016llX] %s %s (pid = %d) (%s)\n",
661 entry->gpuaddr,
662 entry->gpuaddr + entry->size,
663 entry->priv & KGSL_MEMDESC_GUARD_PAGE ? "(+guard)" : "",
664 entry->pending_free ? "(pending free)" : "",
665 entry->pid, entry->name);
666}
667
668static void _check_if_freed(struct kgsl_iommu_context *ctx,
669 uint64_t addr, pid_t ptname)
670{
671 uint64_t gpuaddr = addr;
672 uint64_t size = 0;
673 uint64_t flags = 0;
674 pid_t pid;
675
676 char name[32];
677
678 memset(name, 0, sizeof(name));
679
680 if (kgsl_memfree_find_entry(ptname, &gpuaddr, &size, &flags, &pid)) {
681 kgsl_get_memory_usage(name, sizeof(name) - 1, flags);
682 KGSL_LOG_DUMP(ctx->kgsldev, "---- premature free ----\n");
683 KGSL_LOG_DUMP(ctx->kgsldev,
684 "[%8.8llX-%8.8llX] (%s) was already freed by pid %d\n",
685 gpuaddr, gpuaddr + size, name, pid);
686 }
687}
688
689static bool
690kgsl_iommu_uche_overfetch(struct kgsl_process_private *private,
691 uint64_t faultaddr)
692{
693 int id;
694 struct kgsl_mem_entry *entry = NULL;
695
696 spin_lock(&private->mem_lock);
697 idr_for_each_entry(&private->mem_idr, entry, id) {
698 struct kgsl_memdesc *m = &entry->memdesc;
699
700 if ((faultaddr >= (m->gpuaddr + m->size))
701 && (faultaddr < (m->gpuaddr + m->size + 64))) {
702 spin_unlock(&private->mem_lock);
703 return true;
704 }
705 }
706 spin_unlock(&private->mem_lock);
707 return false;
708}
709
710/*
711 * Read pagefaults where the faulting address lies within the first 64 bytes
712 * of a page (UCHE line size is 64 bytes) and the fault page is preceded by a
713 * valid allocation are considered likely due to UCHE overfetch and suppressed.
714 */
715
716static bool kgsl_iommu_suppress_pagefault(uint64_t faultaddr, int write,
717 struct kgsl_context *context)
718{
719 /*
720 * If there is no context associated with the pagefault then this
721 * could be a fault on a global buffer. We do not suppress faults
722 * on global buffers as they are mainly accessed by the CP bypassing
723 * the UCHE. Also, write pagefaults are never suppressed.
724 */
725 if (!context || write)
726 return false;
727
728 return kgsl_iommu_uche_overfetch(context->proc_priv, faultaddr);
729}
730
731static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
732 struct device *dev, unsigned long addr, int flags, void *token)
733{
734 int ret = 0;
735 struct kgsl_pagetable *pt = token;
736 struct kgsl_mmu *mmu = pt->mmu;
737 struct kgsl_iommu *iommu;
738 struct kgsl_iommu_context *ctx;
739 u64 ptbase;
740 u32 contextidr;
741 pid_t tid = 0;
742 pid_t ptname;
743 struct _mem_entry prev, next;
744 int write;
745 struct kgsl_device *device;
746 struct adreno_device *adreno_dev;
747 unsigned int no_page_fault_log = 0;
748 unsigned int curr_context_id = 0;
749 struct kgsl_context *context;
750 char *fault_type = "unknown";
751
752 static DEFINE_RATELIMIT_STATE(_rs,
753 DEFAULT_RATELIMIT_INTERVAL,
754 DEFAULT_RATELIMIT_BURST);
755
756 if (mmu == NULL)
757 return ret;
758
759 iommu = _IOMMU_PRIV(mmu);
760 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
761 device = KGSL_MMU_DEVICE(mmu);
762 adreno_dev = ADRENO_DEVICE(device);
763
764 if (pt->name == KGSL_MMU_SECURE_PT)
765 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
766
767 /*
768 * set the fault bits and stuff before any printks so that if fault
769 * handler runs then it will know it's dealing with a pagefault.
770 * Read the global current timestamp because we could be in middle of
771 * RB switch and hence the cur RB may not be reliable but global
772 * one will always be reliable
773 */
774 kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
775 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
776
777 context = kgsl_context_get(device, curr_context_id);
778
779 write = (flags & IOMMU_FAULT_WRITE) ? 1 : 0;
780 if (flags & IOMMU_FAULT_TRANSLATION)
781 fault_type = "translation";
782 else if (flags & IOMMU_FAULT_PERMISSION)
783 fault_type = "permission";
784
785 if (kgsl_iommu_suppress_pagefault(addr, write, context)) {
786 iommu->pagefault_suppression_count++;
787 kgsl_context_put(context);
788 return ret;
789 }
790
791 if (context != NULL) {
792 /* save pagefault timestamp for GFT */
793 set_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, &context->priv);
794 tid = context->tid;
795 }
796
797 ctx->fault = 1;
798
799 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
800 &adreno_dev->ft_pf_policy) &&
801 (flags & IOMMU_FAULT_TRANSACTION_STALLED)) {
802 /*
803 * Turn off GPU IRQ so we don't get faults from it too.
804 * The device mutex must be held to change power state
805 */
806 mutex_lock(&device->mutex);
807 kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
808 mutex_unlock(&device->mutex);
809 }
810
811 ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
812 contextidr = KGSL_IOMMU_GET_CTX_REG(ctx, CONTEXTIDR);
813
814 ptname = MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ?
815 KGSL_MMU_GLOBAL_PT : tid;
Sunil Khatri86e95682017-01-23 17:10:32 +0530816 /*
817 * Trace needs to be logged before searching the faulting
818 * address in free list as it takes quite long time in
819 * search and delays the trace unnecessarily.
820 */
821 trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
822 ptname, write ? "write" : "read");
Shrenuj Bansala419c792016-10-20 14:05:11 -0700823
824 if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE,
825 &adreno_dev->ft_pf_policy))
826 no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
827
828 if (!no_page_fault_log && __ratelimit(&_rs)) {
829 KGSL_MEM_CRIT(ctx->kgsldev,
830 "GPU PAGE FAULT: addr = %lX pid= %d\n", addr, ptname);
831 KGSL_MEM_CRIT(ctx->kgsldev,
832 "context=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
833 ctx->name, ptbase, contextidr,
834 write ? "write" : "read", fault_type);
835
836 /* Don't print the debug if this is a permissions fault */
837 if (!(flags & IOMMU_FAULT_PERMISSION)) {
838 _check_if_freed(ctx, addr, ptname);
839
840 KGSL_LOG_DUMP(ctx->kgsldev,
841 "---- nearby memory ----\n");
842
843 _find_mem_entries(mmu, addr, &prev, &next, context);
844 if (prev.gpuaddr)
845 _print_entry(ctx->kgsldev, &prev);
846 else
847 KGSL_LOG_DUMP(ctx->kgsldev, "*EMPTY*\n");
848
849 KGSL_LOG_DUMP(ctx->kgsldev, " <- fault @ %8.8lX\n",
850 addr);
851
852 if (next.gpuaddr != (uint64_t) -1)
853 _print_entry(ctx->kgsldev, &next);
854 else
855 KGSL_LOG_DUMP(ctx->kgsldev, "*EMPTY*\n");
856 }
857 }
858
Shrenuj Bansala419c792016-10-20 14:05:11 -0700859
860 /*
861 * We do not want the h/w to resume fetching data from an iommu
862 * that has faulted, this is better for debugging as it will stall
863 * the GPU and trigger a snapshot. Return EBUSY error.
864 */
865 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
866 &adreno_dev->ft_pf_policy) &&
867 (flags & IOMMU_FAULT_TRANSACTION_STALLED)) {
868 uint32_t sctlr_val;
869
870 ret = -EBUSY;
871 /*
872 * Disable context fault interrupts
873 * as we do not clear FSR in the ISR.
874 * Will be re-enabled after FSR is cleared.
875 */
876 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
877 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
878 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
879
880 adreno_set_gpu_fault(adreno_dev, ADRENO_IOMMU_PAGE_FAULT);
881 /* Go ahead with recovery*/
882 adreno_dispatcher_schedule(device);
883 }
884
885 kgsl_context_put(context);
886 return ret;
887}
888
889/*
890 * kgsl_iommu_disable_clk() - Disable iommu clocks
891 * Disable IOMMU clocks
892 */
893static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
894{
895 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
896 int j;
897
898 atomic_dec(&iommu->clk_enable_count);
899
900 /*
901 * Make sure the clk refcounts are good. An unbalance may
902 * cause the clocks to be off when we need them on.
903 */
904 WARN_ON(atomic_read(&iommu->clk_enable_count) < 0);
905
906 for (j = (KGSL_IOMMU_MAX_CLKS - 1); j >= 0; j--)
907 if (iommu->clks[j])
908 clk_disable_unprepare(iommu->clks[j]);
909}
910
911/*
912 * kgsl_iommu_enable_clk_prepare_enable - Enable the specified IOMMU clock
913 * Try 4 times to enable it and then BUG() for debug
914 */
915static void kgsl_iommu_clk_prepare_enable(struct clk *clk)
916{
917 int num_retries = 4;
918
919 while (num_retries--) {
920 if (!clk_prepare_enable(clk))
921 return;
922 }
923
924 /* Failure is fatal so BUG() to facilitate debug */
925 KGSL_CORE_ERR("IOMMU clock enable failed\n");
926 BUG();
927}
928
929/*
930 * kgsl_iommu_enable_clk - Enable iommu clocks
931 * Enable all the IOMMU clocks
932 */
933static void kgsl_iommu_enable_clk(struct kgsl_mmu *mmu)
934{
935 int j;
936 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
937
938 for (j = 0; j < KGSL_IOMMU_MAX_CLKS; j++) {
939 if (iommu->clks[j])
940 kgsl_iommu_clk_prepare_enable(iommu->clks[j]);
941 }
942 atomic_inc(&iommu->clk_enable_count);
943}
944
945/* kgsl_iommu_get_ttbr0 - Get TTBR0 setting for a pagetable */
946static u64 kgsl_iommu_get_ttbr0(struct kgsl_pagetable *pt)
947{
948 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
949
950 BUG_ON(iommu_pt == NULL);
951
952 return iommu_pt->ttbr0;
953}
954
955static bool kgsl_iommu_pt_equal(struct kgsl_mmu *mmu,
956 struct kgsl_pagetable *pt,
957 u64 ttbr0)
958{
959 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
960 u64 domain_ttbr0;
961
962 if (iommu_pt == NULL)
963 return 0;
964
965 domain_ttbr0 = kgsl_iommu_get_ttbr0(pt);
966
967 return (domain_ttbr0 == ttbr0);
968}
969
970/* kgsl_iommu_get_contextidr - query CONTEXTIDR setting for a pagetable */
971static u32 kgsl_iommu_get_contextidr(struct kgsl_pagetable *pt)
972{
973 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
974
975 BUG_ON(iommu_pt == NULL);
976
977 return iommu_pt->contextidr;
978}
979
980/*
981 * kgsl_iommu_destroy_pagetable - Free up reaources help by a pagetable
982 * @mmu_specific_pt - Pointer to pagetable which is to be freed
983 *
984 * Return - void
985 */
986static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt)
987{
988 struct kgsl_iommu_pt *iommu_pt = pt->priv;
989 struct kgsl_mmu *mmu = pt->mmu;
990 struct kgsl_iommu *iommu;
991 struct kgsl_iommu_context *ctx;
992
993 /*
994 * Make sure all allocations are unmapped before destroying
995 * the pagetable
996 */
997 WARN_ON(!list_empty(&pt->list));
998
999 iommu = _IOMMU_PRIV(mmu);
1000
1001 if (pt->name == KGSL_MMU_SECURE_PT) {
1002 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
1003 kgsl_iommu_unmap_global_secure_pt_entry(pt);
1004 } else {
1005 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1006 kgsl_iommu_unmap_globals(pt);
1007 }
1008
1009 if (iommu_pt->domain) {
1010 trace_kgsl_pagetable_destroy(iommu_pt->ttbr0, pt->name);
1011
1012 _detach_pt(iommu_pt, ctx);
1013
1014 iommu_domain_free(iommu_pt->domain);
1015 }
1016
1017 kfree(iommu_pt);
1018}
1019
1020static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
1021 struct kgsl_pagetable *pagetable,
1022 struct kgsl_iommu_pt *pt)
1023{
1024 unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ?
1025 kgsl_global_secure_pt_entry->size : 0;
1026 if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
1027 pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
1028 secure_global_size;
1029 pt->compat_va_end = KGSL_IOMMU_SECURE_END;
1030 pt->va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size;
1031 pt->va_end = KGSL_IOMMU_SECURE_END;
1032 } else {
1033 pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
1034 pt->compat_va_end = KGSL_IOMMU_SVM_END32;
1035 pt->va_start = KGSL_IOMMU_VA_BASE64;
1036 pt->va_end = KGSL_IOMMU_VA_END64;
1037 }
1038
1039 if (pagetable->name != KGSL_MMU_GLOBAL_PT &&
1040 pagetable->name != KGSL_MMU_SECURE_PT) {
1041 if ((BITS_PER_LONG == 32) || is_compat_task()) {
1042 pt->svm_start = KGSL_IOMMU_SVM_BASE32;
1043 pt->svm_end = KGSL_IOMMU_SVM_END32;
1044 } else {
1045 pt->svm_start = KGSL_IOMMU_SVM_BASE64;
1046 pt->svm_end = KGSL_IOMMU_SVM_END64;
1047 }
1048 }
1049}
1050
1051static void setup_32bit_pagetable(struct kgsl_mmu *mmu,
1052 struct kgsl_pagetable *pagetable,
1053 struct kgsl_iommu_pt *pt)
1054{
1055 unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ?
1056 kgsl_global_secure_pt_entry->size : 0;
1057 if (mmu->secured) {
1058 if (pagetable->name == KGSL_MMU_SECURE_PT) {
1059 pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
1060 secure_global_size;
1061 pt->compat_va_end = KGSL_IOMMU_SECURE_END;
1062 pt->va_start = KGSL_IOMMU_SECURE_BASE +
1063 secure_global_size;
1064 pt->va_end = KGSL_IOMMU_SECURE_END;
1065 } else {
1066 pt->va_start = KGSL_IOMMU_SVM_BASE32;
1067 pt->va_end = KGSL_IOMMU_SECURE_BASE +
1068 secure_global_size;
1069 pt->compat_va_start = pt->va_start;
1070 pt->compat_va_end = pt->va_end;
1071 }
1072 } else {
1073 pt->va_start = KGSL_IOMMU_SVM_BASE32;
1074 pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE;
1075 pt->compat_va_start = pt->va_start;
1076 pt->compat_va_end = pt->va_end;
1077 }
1078
1079 if (pagetable->name != KGSL_MMU_GLOBAL_PT &&
1080 pagetable->name != KGSL_MMU_SECURE_PT) {
1081 pt->svm_start = KGSL_IOMMU_SVM_BASE32;
1082 pt->svm_end = KGSL_IOMMU_SVM_END32;
1083 }
1084}
1085
1086
1087static struct kgsl_iommu_pt *
1088_alloc_pt(struct device *dev, struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1089{
1090 struct kgsl_iommu_pt *iommu_pt;
1091 struct bus_type *bus = kgsl_mmu_get_bus(dev);
1092
1093 if (bus == NULL)
1094 return ERR_PTR(-ENODEV);
1095
1096 iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
1097 if (iommu_pt == NULL)
1098 return ERR_PTR(-ENOMEM);
1099
1100 iommu_pt->domain = iommu_domain_alloc(bus);
1101 if (iommu_pt->domain == NULL) {
1102 kfree(iommu_pt);
1103 return ERR_PTR(-ENODEV);
1104 }
1105
1106 pt->pt_ops = &iommu_pt_ops;
1107 pt->priv = iommu_pt;
1108 pt->fault_addr = ~0ULL;
1109 iommu_pt->rbtree = RB_ROOT;
1110
1111 if (MMU_FEATURE(mmu, KGSL_MMU_64BIT))
1112 setup_64bit_pagetable(mmu, pt, iommu_pt);
1113 else
1114 setup_32bit_pagetable(mmu, pt, iommu_pt);
1115
1116
1117 return iommu_pt;
1118}
1119
1120static void _free_pt(struct kgsl_iommu_context *ctx, struct kgsl_pagetable *pt)
1121{
1122 struct kgsl_iommu_pt *iommu_pt = pt->priv;
1123
1124 pt->pt_ops = NULL;
1125 pt->priv = NULL;
1126
1127 if (iommu_pt == NULL)
1128 return;
1129
1130 _detach_pt(iommu_pt, ctx);
1131
1132 if (iommu_pt->domain != NULL)
1133 iommu_domain_free(iommu_pt->domain);
1134 kfree(iommu_pt);
1135}
1136
1137static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1138{
1139 int ret = 0;
1140 struct kgsl_iommu_pt *iommu_pt = NULL;
1141 unsigned int cb_num;
1142 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1143 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1144
1145 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1146
1147 if (IS_ERR(iommu_pt))
1148 return PTR_ERR(iommu_pt);
1149
1150 if (kgsl_mmu_is_perprocess(mmu)) {
1151 ret = iommu_domain_set_attr(iommu_pt->domain,
1152 DOMAIN_ATTR_PROCID, &pt->name);
1153 if (ret) {
1154 KGSL_CORE_ERR("set DOMAIN_ATTR_PROCID failed: %d\n",
1155 ret);
1156 goto done;
1157 }
1158 }
1159
1160 ret = _attach_pt(iommu_pt, ctx);
1161 if (ret)
1162 goto done;
1163
1164 iommu_set_fault_handler(iommu_pt->domain,
1165 kgsl_iommu_fault_handler, pt);
1166
1167 ret = iommu_domain_get_attr(iommu_pt->domain,
1168 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1169 if (ret) {
1170 KGSL_CORE_ERR("get DOMAIN_ATTR_PROCID failed: %d\n",
1171 ret);
1172 goto done;
1173 }
1174
1175 ctx->cb_num = cb_num;
1176 ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
1177 + (cb_num << KGSL_IOMMU_CB_SHIFT);
1178
1179 ret = iommu_domain_get_attr(iommu_pt->domain,
1180 DOMAIN_ATTR_TTBR0, &iommu_pt->ttbr0);
1181 if (ret) {
1182 KGSL_CORE_ERR("get DOMAIN_ATTR_TTBR0 failed: %d\n",
1183 ret);
1184 goto done;
1185 }
1186 ret = iommu_domain_get_attr(iommu_pt->domain,
1187 DOMAIN_ATTR_CONTEXTIDR, &iommu_pt->contextidr);
1188 if (ret) {
1189 KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXTIDR failed: %d\n",
1190 ret);
1191 goto done;
1192 }
1193
1194 ret = kgsl_iommu_map_globals(pt);
1195
1196done:
1197 if (ret)
1198 _free_pt(ctx, pt);
1199
1200 return ret;
1201}
1202
1203static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1204{
1205 int ret = 0;
1206 struct kgsl_iommu_pt *iommu_pt = NULL;
1207 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1208 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
1209 int secure_vmid = VMID_CP_PIXEL;
1210 unsigned int cb_num;
1211
1212 if (!mmu->secured)
1213 return -EPERM;
1214
1215 if (!MMU_FEATURE(mmu, KGSL_MMU_HYP_SECURE_ALLOC)) {
1216 if (!kgsl_mmu_bus_secured(ctx->dev))
1217 return -EPERM;
1218 }
1219
1220 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1221
1222 if (IS_ERR(iommu_pt))
1223 return PTR_ERR(iommu_pt);
1224
1225 ret = iommu_domain_set_attr(iommu_pt->domain,
1226 DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
1227 if (ret) {
1228 KGSL_CORE_ERR("set DOMAIN_ATTR_SECURE_VMID failed: %d\n", ret);
1229 goto done;
1230 }
1231
1232 ret = _attach_pt(iommu_pt, ctx);
1233
1234 if (MMU_FEATURE(mmu, KGSL_MMU_HYP_SECURE_ALLOC))
1235 iommu_set_fault_handler(iommu_pt->domain,
1236 kgsl_iommu_fault_handler, pt);
1237
1238 ret = iommu_domain_get_attr(iommu_pt->domain,
1239 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1240 if (ret) {
1241 KGSL_CORE_ERR("get DOMAIN_ATTR_PROCID failed: %d\n",
1242 ret);
1243 goto done;
1244 }
1245
1246 ctx->cb_num = cb_num;
1247 ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
1248 + (cb_num << KGSL_IOMMU_CB_SHIFT);
1249
1250 ret = kgsl_map_global_secure_pt_entry(pt);
1251
1252done:
1253 if (ret)
1254 _free_pt(ctx, pt);
1255 return ret;
1256}
1257
1258static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1259{
1260 int ret = 0;
1261 struct kgsl_iommu_pt *iommu_pt = NULL;
1262 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1263 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1264 int dynamic = 1;
1265 unsigned int cb_num = ctx->cb_num;
1266
1267 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1268
1269 if (IS_ERR(iommu_pt))
1270 return PTR_ERR(iommu_pt);
1271
1272 ret = iommu_domain_set_attr(iommu_pt->domain,
1273 DOMAIN_ATTR_DYNAMIC, &dynamic);
1274 if (ret) {
1275 KGSL_CORE_ERR("set DOMAIN_ATTR_DYNAMIC failed: %d\n", ret);
1276 goto done;
1277 }
1278 ret = iommu_domain_set_attr(iommu_pt->domain,
1279 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1280 if (ret) {
1281 KGSL_CORE_ERR("set DOMAIN_ATTR_CONTEXT_BANK failed: %d\n", ret);
1282 goto done;
1283 }
1284
1285 ret = iommu_domain_set_attr(iommu_pt->domain,
1286 DOMAIN_ATTR_PROCID, &pt->name);
1287 if (ret) {
1288 KGSL_CORE_ERR("set DOMAIN_ATTR_PROCID failed: %d\n", ret);
1289 goto done;
1290 }
1291
1292 ret = _attach_pt(iommu_pt, ctx);
1293 if (ret)
1294 goto done;
1295
1296 /* now read back the attributes needed for self programming */
1297 ret = iommu_domain_get_attr(iommu_pt->domain,
1298 DOMAIN_ATTR_TTBR0, &iommu_pt->ttbr0);
1299 if (ret) {
1300 KGSL_CORE_ERR("get DOMAIN_ATTR_TTBR0 failed: %d\n", ret);
1301 goto done;
1302 }
1303
1304 ret = iommu_domain_get_attr(iommu_pt->domain,
1305 DOMAIN_ATTR_CONTEXTIDR, &iommu_pt->contextidr);
1306 if (ret) {
1307 KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXTIDR failed: %d\n", ret);
1308 goto done;
1309 }
1310
1311 ret = kgsl_iommu_map_globals(pt);
1312
1313done:
1314 if (ret)
1315 _free_pt(ctx, pt);
1316
1317 return ret;
1318}
1319
1320/* kgsl_iommu_init_pt - Set up an IOMMU pagetable */
1321static int kgsl_iommu_init_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1322{
1323 if (pt == NULL)
1324 return -EINVAL;
1325
1326 switch (pt->name) {
1327 case KGSL_MMU_GLOBAL_PT:
1328 return _init_global_pt(mmu, pt);
1329
1330 case KGSL_MMU_SECURE_PT:
1331 return _init_secure_pt(mmu, pt);
1332
1333 default:
1334 return _init_per_process_pt(mmu, pt);
1335 }
1336}
1337
1338static struct kgsl_pagetable *kgsl_iommu_getpagetable(struct kgsl_mmu *mmu,
1339 unsigned long name)
1340{
1341 struct kgsl_pagetable *pt;
1342
1343 if (!kgsl_mmu_is_perprocess(mmu) && (name != KGSL_MMU_SECURE_PT)) {
1344 name = KGSL_MMU_GLOBAL_PT;
1345 if (mmu->defaultpagetable != NULL)
1346 return mmu->defaultpagetable;
1347 }
1348
1349 pt = kgsl_get_pagetable(name);
1350 if (pt == NULL)
1351 pt = kgsl_mmu_createpagetableobject(mmu, name);
1352
1353 return pt;
1354}
1355
1356/*
1357 * kgsl_iommu_get_reg_ahbaddr - Returns the ahb address of the register
1358 * @mmu - Pointer to mmu structure
1359 * @id - The context ID of the IOMMU ctx
1360 * @reg - The register for which address is required
1361 *
1362 * Return - The address of register which can be used in type0 packet
1363 */
1364static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
1365 int id, unsigned int reg)
1366{
1367 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1368 struct kgsl_iommu_context *ctx = &iommu->ctx[id];
1369
1370 return ctx->gpu_offset + kgsl_iommu_reg_list[reg];
1371}
1372
1373static void _detach_context(struct kgsl_iommu_context *ctx)
1374{
1375 struct kgsl_iommu_pt *iommu_pt;
1376
1377 if (ctx->default_pt == NULL)
1378 return;
1379
1380 iommu_pt = ctx->default_pt->priv;
1381
1382 _detach_pt(iommu_pt, ctx);
1383
1384 ctx->default_pt = NULL;
1385}
1386
1387static void kgsl_iommu_close(struct kgsl_mmu *mmu)
1388{
1389 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1390 int i;
1391
1392 for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
1393 _detach_context(&iommu->ctx[i]);
1394
1395 kgsl_mmu_putpagetable(mmu->defaultpagetable);
1396 mmu->defaultpagetable = NULL;
1397
1398 kgsl_mmu_putpagetable(mmu->securepagetable);
1399 mmu->securepagetable = NULL;
1400
1401 if (iommu->regbase != NULL)
1402 iounmap(iommu->regbase);
1403
1404 kgsl_sharedmem_free(&kgsl_secure_guard_page_memdesc);
1405
1406 if (kgsl_guard_page != NULL) {
1407 __free_page(kgsl_guard_page);
1408 kgsl_guard_page = NULL;
1409 }
1410
1411 if (kgsl_dummy_page != NULL) {
1412 __free_page(kgsl_dummy_page);
1413 kgsl_dummy_page = NULL;
1414 }
1415
1416 kgsl_iommu_remove_global(mmu, &iommu->setstate);
1417 kgsl_sharedmem_free(&iommu->setstate);
1418 kgsl_cleanup_qdss_desc(mmu);
1419}
1420
1421static int _setstate_alloc(struct kgsl_device *device,
1422 struct kgsl_iommu *iommu)
1423{
1424 int ret;
1425
1426 ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, PAGE_SIZE);
1427
1428 if (!ret) {
1429 /* Mark the setstate memory as read only */
1430 iommu->setstate.flags |= KGSL_MEMFLAGS_GPUREADONLY;
1431
1432 kgsl_sharedmem_set(device, &iommu->setstate, 0, 0, PAGE_SIZE);
1433 }
1434
1435 return ret;
1436}
1437
1438static int kgsl_iommu_init(struct kgsl_mmu *mmu)
1439{
1440 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
1441 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1442 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1443 int status;
1444
1445 mmu->features |= KGSL_MMU_PAGED;
1446
1447 if (ctx->name == NULL) {
1448 KGSL_CORE_ERR("dt: gfx3d0_user context bank not found\n");
1449 return -EINVAL;
1450 }
1451
1452 status = _setstate_alloc(device, iommu);
1453 if (status)
1454 return status;
1455
1456 /* check requirements for per process pagetables */
1457 if (ctx->gpu_offset == UINT_MAX) {
1458 KGSL_CORE_ERR("missing qcom,gpu-offset forces global pt\n");
1459 mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
1460 }
1461
1462 if (iommu->version == 1 && iommu->micro_mmu_ctrl == UINT_MAX) {
1463 KGSL_CORE_ERR(
1464 "missing qcom,micro-mmu-control forces global pt\n");
1465 mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
1466 }
1467
1468 /* Check to see if we need to do the IOMMU sync dance */
1469 need_iommu_sync = of_property_read_bool(device->pdev->dev.of_node,
1470 "qcom,gpu-quirk-iommu-sync");
1471
1472 iommu->regbase = ioremap(iommu->regstart, iommu->regsize);
1473 if (iommu->regbase == NULL) {
1474 KGSL_CORE_ERR("Could not map IOMMU registers 0x%lx:0x%x\n",
1475 iommu->regstart, iommu->regsize);
1476 status = -ENOMEM;
1477 goto done;
1478 }
1479
1480 if (addr_entry_cache == NULL) {
1481 addr_entry_cache = KMEM_CACHE(kgsl_iommu_addr_entry, 0);
1482 if (addr_entry_cache == NULL) {
1483 status = -ENOMEM;
1484 goto done;
1485 }
1486 }
1487
1488 kgsl_iommu_add_global(mmu, &iommu->setstate, "setstate");
1489 kgsl_setup_qdss_desc(device);
1490
1491done:
1492 if (status)
1493 kgsl_iommu_close(mmu);
1494
1495 return status;
1496}
1497
1498static int _setup_user_context(struct kgsl_mmu *mmu)
1499{
1500 int ret = 0;
1501 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1502 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1503 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
1504 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1505 struct kgsl_iommu_pt *iommu_pt = NULL;
1506 unsigned int sctlr_val;
1507
1508 if (mmu->defaultpagetable == NULL) {
1509 mmu->defaultpagetable = kgsl_mmu_getpagetable(mmu,
1510 KGSL_MMU_GLOBAL_PT);
1511 /* if we don't have a default pagetable, nothing will work */
1512 if (IS_ERR(mmu->defaultpagetable)) {
1513 ret = PTR_ERR(mmu->defaultpagetable);
1514 mmu->defaultpagetable = NULL;
1515 return ret;
1516 }
1517 }
1518
1519 iommu_pt = mmu->defaultpagetable->priv;
1520 if (iommu_pt == NULL)
1521 return -ENODEV;
1522
1523 ret = _attach_pt(iommu_pt, ctx);
1524 if (ret)
1525 return ret;
1526
1527 ctx->default_pt = mmu->defaultpagetable;
1528
1529 kgsl_iommu_enable_clk(mmu);
1530
1531 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
1532
1533 /*
1534 * If pagefault policy is GPUHALT_ENABLE,
1535 * 1) Program CFCFG to 1 to enable STALL mode
1536 * 2) Program HUPCF to 0 (Stall or terminate subsequent
1537 * transactions in the presence of an outstanding fault)
1538 * else
1539 * 1) Program CFCFG to 0 to disable STALL mode (0=Terminate)
1540 * 2) Program HUPCF to 1 (Process subsequent transactions
1541 * independently of any outstanding fault)
1542 */
1543
1544 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
1545 &adreno_dev->ft_pf_policy)) {
1546 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
1547 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
1548 } else {
1549 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
1550 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
1551 }
1552 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
1553 kgsl_iommu_disable_clk(mmu);
1554
1555 return 0;
1556}
1557
1558static int _setup_secure_context(struct kgsl_mmu *mmu)
1559{
1560 int ret;
1561 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1562 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
1563 unsigned int cb_num;
1564
1565 struct kgsl_iommu_pt *iommu_pt;
1566
1567 if (ctx->dev == NULL || !mmu->secured)
1568 return 0;
1569
1570 if (mmu->securepagetable == NULL) {
1571 mmu->securepagetable = kgsl_mmu_getpagetable(mmu,
1572 KGSL_MMU_SECURE_PT);
1573 if (IS_ERR(mmu->securepagetable)) {
1574 ret = PTR_ERR(mmu->securepagetable);
1575 mmu->securepagetable = NULL;
1576 return ret;
1577 } else if (mmu->securepagetable == NULL) {
1578 return -ENOMEM;
1579 }
1580 }
1581 iommu_pt = mmu->securepagetable->priv;
1582
1583 ret = _attach_pt(iommu_pt, ctx);
1584 if (ret)
1585 goto done;
1586
1587 ctx->default_pt = mmu->securepagetable;
1588
1589 ret = iommu_domain_get_attr(iommu_pt->domain, DOMAIN_ATTR_CONTEXT_BANK,
1590 &cb_num);
1591 if (ret) {
1592 KGSL_CORE_ERR("get CONTEXT_BANK attr, err %d\n", ret);
1593 goto done;
1594 }
1595 ctx->cb_num = cb_num;
1596done:
1597 if (ret)
1598 _detach_context(ctx);
1599 return ret;
1600}
1601
1602static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
1603
1604static int kgsl_iommu_start(struct kgsl_mmu *mmu)
1605{
1606 int status;
1607 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1608
1609 status = _setup_user_context(mmu);
1610 if (status)
1611 return status;
1612
1613 status = _setup_secure_context(mmu);
1614 if (status) {
1615 _detach_context(&iommu->ctx[KGSL_IOMMU_CONTEXT_USER]);
1616 return status;
1617 }
1618
1619 /* Make sure the hardware is programmed to the default pagetable */
1620 return kgsl_iommu_set_pt(mmu, mmu->defaultpagetable);
1621}
1622
1623static int
1624kgsl_iommu_unmap_offset(struct kgsl_pagetable *pt,
1625 struct kgsl_memdesc *memdesc, uint64_t addr,
1626 uint64_t offset, uint64_t size)
1627{
1628 if (size == 0 || (size + offset) > kgsl_memdesc_footprint(memdesc))
1629 return -EINVAL;
1630 /*
1631 * All GPU addresses as assigned are page aligned, but some
1632 * functions perturb the gpuaddr with an offset, so apply the
1633 * mask here to make sure we have the right address.
1634 */
1635
1636 addr = PAGE_ALIGN(addr);
1637 if (addr == 0)
1638 return -EINVAL;
1639
1640 return _iommu_unmap_sync_pc(pt, memdesc, addr + offset, size);
1641}
1642
1643static int
1644kgsl_iommu_unmap(struct kgsl_pagetable *pt, struct kgsl_memdesc *memdesc)
1645{
1646 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
1647 return -EINVAL;
1648
1649 return kgsl_iommu_unmap_offset(pt, memdesc, memdesc->gpuaddr, 0,
1650 kgsl_memdesc_footprint(memdesc));
1651}
1652
1653/**
1654 * _iommu_map_guard_page - Map iommu guard page
1655 * @pt - Pointer to kgsl pagetable structure
1656 * @memdesc - memdesc to add guard page
1657 * @gpuaddr - GPU addr of guard page
1658 * @protflags - flags for mapping
1659 *
1660 * Return 0 on success, error on map fail
1661 */
1662static int _iommu_map_guard_page(struct kgsl_pagetable *pt,
1663 struct kgsl_memdesc *memdesc,
1664 uint64_t gpuaddr,
1665 unsigned int protflags)
1666{
1667 phys_addr_t physaddr;
1668
1669 if (!kgsl_memdesc_has_guard_page(memdesc))
1670 return 0;
1671
1672 /*
1673 * Allocate guard page for secure buffers.
1674 * This has to be done after we attach a smmu pagetable.
1675 * Allocate the guard page when first secure buffer is.
1676 * mapped to save 1MB of memory if CPZ is not used.
1677 */
1678 if (kgsl_memdesc_is_secured(memdesc)) {
1679 struct scatterlist *sg;
1680 unsigned int sgp_size = pt->mmu->secure_align_mask + 1;
1681
1682 if (!kgsl_secure_guard_page_memdesc.sgt) {
1683 if (kgsl_allocate_user(KGSL_MMU_DEVICE(pt->mmu),
1684 &kgsl_secure_guard_page_memdesc,
1685 sgp_size, KGSL_MEMFLAGS_SECURE)) {
1686 KGSL_CORE_ERR(
1687 "Secure guard page alloc failed\n");
1688 return -ENOMEM;
1689 }
1690 }
1691
1692 sg = kgsl_secure_guard_page_memdesc.sgt->sgl;
1693 physaddr = page_to_phys(sg_page(sg));
1694 } else {
1695 if (kgsl_guard_page == NULL) {
1696 kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
1697 __GFP_NORETRY | __GFP_HIGHMEM);
1698 if (kgsl_guard_page == NULL)
1699 return -ENOMEM;
1700 }
1701
1702 physaddr = page_to_phys(kgsl_guard_page);
1703 }
1704
1705 return _iommu_map_sync_pc(pt, memdesc, gpuaddr, physaddr,
1706 kgsl_memdesc_guard_page_size(memdesc),
1707 protflags & ~IOMMU_WRITE);
1708}
1709
1710static unsigned int _get_protection_flags(struct kgsl_memdesc *memdesc)
1711{
1712 unsigned int flags = IOMMU_READ | IOMMU_WRITE | IOMMU_NOEXEC;
1713
1714 if (memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY)
1715 flags &= ~IOMMU_WRITE;
1716
1717 if (memdesc->priv & KGSL_MEMDESC_PRIVILEGED)
1718 flags |= IOMMU_PRIV;
1719
1720 return flags;
1721}
1722
1723static int
1724kgsl_iommu_map(struct kgsl_pagetable *pt,
1725 struct kgsl_memdesc *memdesc)
1726{
1727 int ret;
1728 uint64_t addr = memdesc->gpuaddr;
1729 uint64_t size = memdesc->size;
1730 unsigned int flags = _get_protection_flags(memdesc);
1731 struct sg_table *sgt = NULL;
1732
1733 /*
1734 * For paged memory allocated through kgsl, memdesc->pages is not NULL.
1735 * Allocate sgt here just for its map operation. Contiguous memory
1736 * already has its sgt, so no need to allocate it here.
1737 */
1738 if (memdesc->pages != NULL)
1739 sgt = kgsl_alloc_sgt_from_pages(memdesc);
1740 else
1741 sgt = memdesc->sgt;
1742
1743 if (IS_ERR(sgt))
1744 return PTR_ERR(sgt);
1745
1746 ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt->sgl,
1747 sgt->nents, flags);
1748 if (ret)
1749 goto done;
1750
1751 ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
1752 if (ret)
1753 _iommu_unmap_sync_pc(pt, memdesc, addr, size);
1754
1755done:
1756 if (memdesc->pages != NULL)
1757 kgsl_free_sgt(sgt);
1758
1759 return ret;
1760}
1761
1762static int kgsl_iommu_sparse_dummy_map(struct kgsl_pagetable *pt,
1763 struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size)
1764{
1765 int ret = 0, i;
1766 struct page **pages = NULL;
1767 struct sg_table sgt;
1768 int count = size >> PAGE_SHIFT;
1769
1770 /* verify the offset is within our range */
1771 if (size + offset > memdesc->size)
1772 return -EINVAL;
1773
1774 if (kgsl_dummy_page == NULL) {
1775 kgsl_dummy_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
1776 __GFP_HIGHMEM);
1777 if (kgsl_dummy_page == NULL)
1778 return -ENOMEM;
1779 }
1780
1781 pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
1782 if (pages == NULL)
1783 return -ENOMEM;
1784
1785 for (i = 0; i < count; i++)
1786 pages[i] = kgsl_dummy_page;
1787
1788 ret = sg_alloc_table_from_pages(&sgt, pages, count,
1789 0, size, GFP_KERNEL);
1790 if (ret == 0) {
1791 ret = _iommu_map_sg_sync_pc(pt, memdesc->gpuaddr + offset,
1792 memdesc, sgt.sgl, sgt.nents,
1793 IOMMU_READ | IOMMU_NOEXEC);
1794 sg_free_table(&sgt);
1795 }
1796
1797 kfree(pages);
1798
1799 return ret;
1800}
1801
1802static int _map_to_one_page(struct kgsl_pagetable *pt, uint64_t addr,
1803 struct kgsl_memdesc *memdesc, uint64_t physoffset,
1804 uint64_t size, unsigned int map_flags)
1805{
1806 int ret = 0, i;
1807 int pg_sz = kgsl_memdesc_get_pagesize(memdesc);
1808 int count = size >> PAGE_SHIFT;
1809 struct page *page = NULL;
1810 struct page **pages = NULL;
1811 struct sg_page_iter sg_iter;
1812 struct sg_table sgt;
1813
1814 /* Find our physaddr offset addr */
1815 if (memdesc->pages != NULL)
1816 page = memdesc->pages[physoffset >> PAGE_SHIFT];
1817 else {
1818 for_each_sg_page(memdesc->sgt->sgl, &sg_iter,
1819 memdesc->sgt->nents, physoffset >> PAGE_SHIFT) {
1820 page = sg_page_iter_page(&sg_iter);
1821 break;
1822 }
1823 }
1824
1825 if (page == NULL)
1826 return -EINVAL;
1827
1828 pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
1829 if (pages == NULL)
1830 return -ENOMEM;
1831
1832 for (i = 0; i < count; i++) {
1833 if (pg_sz != PAGE_SIZE) {
1834 struct page *tmp_page = page;
1835 int j;
1836
1837 for (j = 0; j < 16; j++, tmp_page += PAGE_SIZE)
1838 pages[i++] = tmp_page;
1839 } else
1840 pages[i] = page;
1841 }
1842
1843 ret = sg_alloc_table_from_pages(&sgt, pages, count,
1844 0, size, GFP_KERNEL);
1845 if (ret == 0) {
1846 ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt.sgl,
1847 sgt.nents, map_flags);
1848 sg_free_table(&sgt);
1849 }
1850
1851 kfree(pages);
1852
1853 return ret;
1854}
1855
1856static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
1857 uint64_t virtaddr, uint64_t virtoffset,
1858 struct kgsl_memdesc *memdesc, uint64_t physoffset,
1859 uint64_t size, uint64_t feature_flag)
1860{
1861 int pg_sz;
1862 unsigned int protflags = _get_protection_flags(memdesc);
1863 int ret;
1864 struct sg_table *sgt = NULL;
1865
1866 pg_sz = kgsl_memdesc_get_pagesize(memdesc);
1867 if (!IS_ALIGNED(virtaddr | virtoffset | physoffset | size, pg_sz))
1868 return -EINVAL;
1869
1870 if (size == 0)
1871 return -EINVAL;
1872
1873 if (!(feature_flag & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS) &&
1874 size + physoffset > kgsl_memdesc_footprint(memdesc))
1875 return -EINVAL;
1876
1877 /*
1878 * For paged memory allocated through kgsl, memdesc->pages is not NULL.
1879 * Allocate sgt here just for its map operation. Contiguous memory
1880 * already has its sgt, so no need to allocate it here.
1881 */
1882 if (memdesc->pages != NULL)
1883 sgt = kgsl_alloc_sgt_from_pages(memdesc);
1884 else
1885 sgt = memdesc->sgt;
1886
1887 if (IS_ERR(sgt))
1888 return PTR_ERR(sgt);
1889
1890 if (feature_flag & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS)
1891 ret = _map_to_one_page(pt, virtaddr + virtoffset,
1892 memdesc, physoffset, size, protflags);
1893 else
1894 ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
1895 memdesc, sgt->sgl, sgt->nents,
1896 physoffset, size, protflags);
1897
1898 if (memdesc->pages != NULL)
1899 kgsl_free_sgt(sgt);
1900
1901 return ret;
1902}
1903
1904/* This function must be called with context bank attached */
1905static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
1906{
1907 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1908 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1909 unsigned int sctlr_val;
1910
1911 if (ctx->default_pt != NULL) {
1912 kgsl_iommu_enable_clk(mmu);
1913 KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff);
1914 /*
1915 * Re-enable context fault interrupts after clearing
1916 * FSR to prevent the interrupt from firing repeatedly
1917 */
1918 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
1919 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
1920 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
1921 /*
1922 * Make sure the above register writes
1923 * are not reordered across the barrier
1924 * as we use writel_relaxed to write them
1925 */
1926 wmb();
1927 kgsl_iommu_disable_clk(mmu);
1928 }
1929}
1930
1931static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
1932{
1933 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1934 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1935
1936 if (ctx->default_pt != NULL && ctx->fault) {
1937 /*
1938 * Write 1 to RESUME.TnR to terminate the
1939 * stalled transaction.
1940 */
1941 KGSL_IOMMU_SET_CTX_REG(ctx, RESUME, 1);
1942 /*
1943 * Make sure the above register writes
1944 * are not reordered across the barrier
1945 * as we use writel_relaxed to write them
1946 */
1947 wmb();
1948 ctx->fault = 0;
1949 }
1950}
1951
1952static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
1953{
1954 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1955 int i;
1956
1957 /*
1958 * If the iommu supports retention, we don't need
1959 * to detach when stopping.
1960 */
1961 if (!MMU_FEATURE(mmu, KGSL_MMU_RETENTION)) {
1962 for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
1963 _detach_context(&iommu->ctx[i]);
1964 }
1965}
1966
1967static u64
1968kgsl_iommu_get_current_ttbr0(struct kgsl_mmu *mmu)
1969{
1970 u64 val;
1971 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1972 /*
1973 * We cannot enable or disable the clocks in interrupt context, this
1974 * function is called from interrupt context if there is an axi error
1975 */
1976 if (in_interrupt())
1977 return 0;
1978
1979 kgsl_iommu_enable_clk(mmu);
1980 val = KGSL_IOMMU_GET_CTX_REG_Q(&iommu->ctx[KGSL_IOMMU_CONTEXT_USER],
1981 TTBR0);
1982 kgsl_iommu_disable_clk(mmu);
1983 return val;
1984}
1985
1986/*
1987 * kgsl_iommu_set_pt - Change the IOMMU pagetable of the primary context bank
1988 * @mmu - Pointer to mmu structure
1989 * @pt - Pagetable to switch to
1990 *
1991 * Set the new pagetable for the IOMMU by doing direct register writes
1992 * to the IOMMU registers through the cpu
1993 *
1994 * Return - void
1995 */
1996static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1997{
1998 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1999 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2000 uint64_t ttbr0, temp;
2001 unsigned int contextidr;
2002 unsigned long wait_for_flush;
2003
2004 if ((pt != mmu->defaultpagetable) && !kgsl_mmu_is_perprocess(mmu))
2005 return 0;
2006
2007 kgsl_iommu_enable_clk(mmu);
2008
2009 ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pt);
2010 contextidr = kgsl_mmu_pagetable_get_contextidr(pt);
2011
2012 KGSL_IOMMU_SET_CTX_REG_Q(ctx, TTBR0, ttbr0);
2013 KGSL_IOMMU_SET_CTX_REG(ctx, CONTEXTIDR, contextidr);
2014
2015 /* memory barrier before reading TTBR0 register */
2016 mb();
2017 temp = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
2018
2019 KGSL_IOMMU_SET_CTX_REG(ctx, TLBIALL, 1);
2020 /* make sure the TBLI write completes before we wait */
2021 mb();
2022 /*
2023 * Wait for flush to complete by polling the flush
2024 * status bit of TLBSTATUS register for not more than
2025 * 2 s. After 2s just exit, at that point the SMMU h/w
2026 * may be stuck and will eventually cause GPU to hang
2027 * or bring the system down.
2028 */
2029 wait_for_flush = jiffies + msecs_to_jiffies(2000);
2030 KGSL_IOMMU_SET_CTX_REG(ctx, TLBSYNC, 0);
2031 while (KGSL_IOMMU_GET_CTX_REG(ctx, TLBSTATUS) &
2032 (KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE)) {
2033 if (time_after(jiffies, wait_for_flush)) {
2034 KGSL_DRV_WARN(KGSL_MMU_DEVICE(mmu),
2035 "Wait limit reached for IOMMU tlb flush\n");
2036 break;
2037 }
2038 cpu_relax();
2039 }
2040
2041 kgsl_iommu_disable_clk(mmu);
2042 return 0;
2043}
2044
2045/*
2046 * kgsl_iommu_set_pf_policy() - Set the pagefault policy for IOMMU
2047 * @mmu: Pointer to mmu structure
2048 * @pf_policy: The pagefault polict to set
2049 *
2050 * Check if the new policy indicated by pf_policy is same as current
2051 * policy, if same then return else set the policy
2052 */
2053static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
2054 unsigned long pf_policy)
2055{
2056 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2057 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2058 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
2059 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2060
2061 if ((adreno_dev->ft_pf_policy &
2062 BIT(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)) ==
2063 (pf_policy & BIT(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)))
2064 return 0;
2065
2066 /* If not attached, policy will be updated during the next attach */
2067 if (ctx->default_pt != NULL) {
2068 unsigned int sctlr_val;
2069
2070 kgsl_iommu_enable_clk(mmu);
2071
2072 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
2073
2074 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, &pf_policy)) {
2075 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
2076 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
2077 } else {
2078 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
2079 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
2080 }
2081
2082 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
2083
2084 kgsl_iommu_disable_clk(mmu);
2085 }
2086
2087 return 0;
2088}
2089
2090static struct kgsl_protected_registers *
2091kgsl_iommu_get_prot_regs(struct kgsl_mmu *mmu)
2092{
2093 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2094
2095 return &iommu->protect;
2096}
2097
2098static struct kgsl_iommu_addr_entry *_find_gpuaddr(
2099 struct kgsl_pagetable *pagetable, uint64_t gpuaddr)
2100{
2101 struct kgsl_iommu_pt *pt = pagetable->priv;
2102 struct rb_node *node = pt->rbtree.rb_node;
2103
2104 while (node != NULL) {
2105 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2106 struct kgsl_iommu_addr_entry, node);
2107
2108 if (gpuaddr < entry->base)
2109 node = node->rb_left;
2110 else if (gpuaddr > entry->base)
2111 node = node->rb_right;
2112 else
2113 return entry;
2114 }
2115
2116 return NULL;
2117}
2118
2119static int _remove_gpuaddr(struct kgsl_pagetable *pagetable,
2120 uint64_t gpuaddr)
2121{
2122 struct kgsl_iommu_pt *pt = pagetable->priv;
2123 struct kgsl_iommu_addr_entry *entry;
2124
2125 entry = _find_gpuaddr(pagetable, gpuaddr);
2126
2127 if (entry != NULL) {
2128 rb_erase(&entry->node, &pt->rbtree);
2129 kmem_cache_free(addr_entry_cache, entry);
2130 return 0;
2131 }
2132
2133 WARN(1, "Couldn't remove gpuaddr: 0x%llx\n", gpuaddr);
2134 return -ENOMEM;
2135}
2136
2137static int _insert_gpuaddr(struct kgsl_pagetable *pagetable,
2138 uint64_t gpuaddr, uint64_t size)
2139{
2140 struct kgsl_iommu_pt *pt = pagetable->priv;
2141 struct rb_node **node, *parent = NULL;
2142 struct kgsl_iommu_addr_entry *new =
2143 kmem_cache_alloc(addr_entry_cache, GFP_ATOMIC);
2144
2145 if (new == NULL)
2146 return -ENOMEM;
2147
2148 new->base = gpuaddr;
2149 new->size = size;
2150
2151 node = &pt->rbtree.rb_node;
2152
2153 while (*node != NULL) {
2154 struct kgsl_iommu_addr_entry *this;
2155
2156 parent = *node;
2157 this = rb_entry(parent, struct kgsl_iommu_addr_entry, node);
2158
2159 if (new->base < this->base)
2160 node = &parent->rb_left;
2161 else if (new->base > this->base)
2162 node = &parent->rb_right;
2163 else {
2164 /* Duplicate entry */
2165 WARN(1, "duplicate gpuaddr: 0x%llx\n", gpuaddr);
2166 return -EEXIST;
2167 }
2168 }
2169
2170 rb_link_node(&new->node, parent, node);
2171 rb_insert_color(&new->node, &pt->rbtree);
2172
2173 return 0;
2174}
2175
2176static uint64_t _get_unmapped_area(struct kgsl_pagetable *pagetable,
2177 uint64_t bottom, uint64_t top, uint64_t size,
2178 uint64_t align)
2179{
2180 struct kgsl_iommu_pt *pt = pagetable->priv;
2181 struct rb_node *node = rb_first(&pt->rbtree);
2182 uint64_t start;
2183
2184 bottom = ALIGN(bottom, align);
2185 start = bottom;
2186
2187 while (node != NULL) {
2188 uint64_t gap;
2189 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2190 struct kgsl_iommu_addr_entry, node);
2191
2192 /*
2193 * Skip any entries that are outside of the range, but make sure
2194 * to account for some that might straddle the lower bound
2195 */
2196 if (entry->base < bottom) {
2197 if (entry->base + entry->size > bottom)
2198 start = ALIGN(entry->base + entry->size, align);
2199 node = rb_next(node);
2200 continue;
2201 }
2202
2203 /* Stop if we went over the top */
2204 if (entry->base >= top)
2205 break;
2206
2207 /* Make sure there is a gap to consider */
2208 if (start < entry->base) {
2209 gap = entry->base - start;
2210
2211 if (gap >= size)
2212 return start;
2213 }
2214
2215 /* Stop if there is no more room in the region */
2216 if (entry->base + entry->size >= top)
2217 return (uint64_t) -ENOMEM;
2218
2219 /* Start the next cycle at the end of the current entry */
2220 start = ALIGN(entry->base + entry->size, align);
2221 node = rb_next(node);
2222 }
2223
2224 if (start + size <= top)
2225 return start;
2226
2227 return (uint64_t) -ENOMEM;
2228}
2229
2230static uint64_t _get_unmapped_area_topdown(struct kgsl_pagetable *pagetable,
2231 uint64_t bottom, uint64_t top, uint64_t size,
2232 uint64_t align)
2233{
2234 struct kgsl_iommu_pt *pt = pagetable->priv;
2235 struct rb_node *node = rb_last(&pt->rbtree);
2236 uint64_t end = top;
2237 uint64_t mask = ~(align - 1);
2238 struct kgsl_iommu_addr_entry *entry;
2239
2240 /* Make sure that the bottom is correctly aligned */
2241 bottom = ALIGN(bottom, align);
2242
2243 /* Make sure the requested size will fit in the range */
2244 if (size > (top - bottom))
2245 return -ENOMEM;
2246
2247 /* Walk back through the list to find the highest entry in the range */
2248 for (node = rb_last(&pt->rbtree); node != NULL; node = rb_prev(node)) {
2249 entry = rb_entry(node, struct kgsl_iommu_addr_entry, node);
2250 if (entry->base < top)
2251 break;
2252 }
2253
2254 while (node != NULL) {
2255 uint64_t offset;
2256
2257 entry = rb_entry(node, struct kgsl_iommu_addr_entry, node);
2258
2259 /* If the entire entry is below the range the search is over */
2260 if ((entry->base + entry->size) < bottom)
2261 break;
2262
2263 /* Get the top of the entry properly aligned */
2264 offset = ALIGN(entry->base + entry->size, align);
2265
2266 /*
2267 * Try to allocate the memory from the top of the gap,
2268 * making sure that it fits between the top of this entry and
2269 * the bottom of the previous one
2270 */
2271
2272 if ((end > size) && (offset < end)) {
2273 uint64_t chunk = (end - size) & mask;
2274
2275 if (chunk >= offset)
2276 return chunk;
2277 }
2278
2279 /*
2280 * If we get here and the current entry is outside of the range
2281 * then we are officially out of room
2282 */
2283
2284 if (entry->base < bottom)
2285 return (uint64_t) -ENOMEM;
2286
2287 /* Set the top of the gap to the current entry->base */
2288 end = entry->base;
2289
2290 /* And move on to the next lower entry */
2291 node = rb_prev(node);
2292 }
2293
2294 /* If we get here then there are no more entries in the region */
2295 if ((end > size) && (((end - size) & mask) >= bottom))
2296 return (end - size) & mask;
2297
2298 return (uint64_t) -ENOMEM;
2299}
2300
2301static uint64_t kgsl_iommu_find_svm_region(struct kgsl_pagetable *pagetable,
2302 uint64_t start, uint64_t end, uint64_t size,
2303 uint64_t alignment)
2304{
2305 uint64_t addr;
2306
2307 /* Avoid black holes */
2308 if (WARN(end <= start, "Bad search range: 0x%llx-0x%llx", start, end))
2309 return (uint64_t) -EINVAL;
2310
2311 spin_lock(&pagetable->lock);
2312 addr = _get_unmapped_area_topdown(pagetable,
2313 start, end, size, alignment);
2314 spin_unlock(&pagetable->lock);
2315 return addr;
2316}
2317
2318static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
2319 uint64_t gpuaddr, uint64_t size)
2320{
2321 int ret = -ENOMEM;
2322 struct kgsl_iommu_pt *pt = pagetable->priv;
2323 struct rb_node *node;
2324
2325 /* Make sure the requested address doesn't fall in the global range */
2326 if (ADDR_IN_GLOBAL(gpuaddr) || ADDR_IN_GLOBAL(gpuaddr + size))
2327 return -ENOMEM;
2328
2329 spin_lock(&pagetable->lock);
2330 node = pt->rbtree.rb_node;
2331
2332 while (node != NULL) {
2333 uint64_t start, end;
2334 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2335 struct kgsl_iommu_addr_entry, node);
2336
2337 start = entry->base;
2338 end = entry->base + entry->size;
2339
2340 if (gpuaddr + size <= start)
2341 node = node->rb_left;
2342 else if (end <= gpuaddr)
2343 node = node->rb_right;
2344 else
2345 goto out;
2346 }
2347
2348 ret = _insert_gpuaddr(pagetable, gpuaddr, size);
2349out:
2350 spin_unlock(&pagetable->lock);
2351 return ret;
2352}
2353
2354
2355static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
2356 struct kgsl_memdesc *memdesc)
2357{
2358 struct kgsl_iommu_pt *pt = pagetable->priv;
2359 int ret = 0;
2360 uint64_t addr, start, end, size;
2361 unsigned int align;
2362
2363 if (WARN_ON(kgsl_memdesc_use_cpu_map(memdesc)))
2364 return -EINVAL;
2365
2366 if (memdesc->flags & KGSL_MEMFLAGS_SECURE &&
2367 pagetable->name != KGSL_MMU_SECURE_PT)
2368 return -EINVAL;
2369
2370 size = kgsl_memdesc_footprint(memdesc);
2371
2372 align = 1 << kgsl_memdesc_get_align(memdesc);
2373
2374 if (memdesc->flags & KGSL_MEMFLAGS_FORCE_32BIT) {
2375 start = pt->compat_va_start;
2376 end = pt->compat_va_end;
2377 } else {
2378 start = pt->va_start;
2379 end = pt->va_end;
2380 }
2381
2382 spin_lock(&pagetable->lock);
2383
2384 addr = _get_unmapped_area(pagetable, start, end, size, align);
2385
2386 if (addr == (uint64_t) -ENOMEM) {
2387 ret = -ENOMEM;
2388 goto out;
2389 }
2390
2391 ret = _insert_gpuaddr(pagetable, addr, size);
2392 if (ret == 0) {
2393 memdesc->gpuaddr = addr;
2394 memdesc->pagetable = pagetable;
2395 }
2396
2397out:
2398 spin_unlock(&pagetable->lock);
2399 return ret;
2400}
2401
2402static void kgsl_iommu_put_gpuaddr(struct kgsl_memdesc *memdesc)
2403{
2404 if (memdesc->pagetable == NULL)
2405 return;
2406
2407 spin_lock(&memdesc->pagetable->lock);
2408
2409 _remove_gpuaddr(memdesc->pagetable, memdesc->gpuaddr);
2410
2411 spin_unlock(&memdesc->pagetable->lock);
2412}
2413
2414static int kgsl_iommu_svm_range(struct kgsl_pagetable *pagetable,
2415 uint64_t *lo, uint64_t *hi, uint64_t memflags)
2416{
2417 struct kgsl_iommu_pt *pt = pagetable->priv;
2418 bool gpu_compat = (memflags & KGSL_MEMFLAGS_FORCE_32BIT) != 0;
2419
2420 if (lo != NULL)
2421 *lo = gpu_compat ? pt->compat_va_start : pt->svm_start;
2422 if (hi != NULL)
2423 *hi = gpu_compat ? pt->compat_va_end : pt->svm_end;
2424
2425 return 0;
2426}
2427
2428static bool kgsl_iommu_addr_in_range(struct kgsl_pagetable *pagetable,
2429 uint64_t gpuaddr)
2430{
2431 struct kgsl_iommu_pt *pt = pagetable->priv;
2432
2433 if (gpuaddr == 0)
2434 return false;
2435
2436 if (gpuaddr >= pt->va_start && gpuaddr < pt->va_end)
2437 return true;
2438
2439 if (gpuaddr >= pt->compat_va_start && gpuaddr < pt->compat_va_end)
2440 return true;
2441
2442 if (gpuaddr >= pt->svm_start && gpuaddr < pt->svm_end)
2443 return true;
2444
2445 return false;
2446}
2447
2448static const struct {
2449 int id;
2450 char *name;
2451} kgsl_iommu_cbs[] = {
2452 { KGSL_IOMMU_CONTEXT_USER, "gfx3d_user", },
2453 { KGSL_IOMMU_CONTEXT_SECURE, "gfx3d_secure" },
2454};
2455
2456static int _kgsl_iommu_cb_probe(struct kgsl_device *device,
2457 struct kgsl_iommu *iommu, struct device_node *node)
2458{
2459 struct platform_device *pdev = of_find_device_by_node(node);
2460 struct kgsl_iommu_context *ctx = NULL;
2461 int i;
2462
2463 for (i = 0; i < ARRAY_SIZE(kgsl_iommu_cbs); i++) {
2464 if (!strcmp(node->name, kgsl_iommu_cbs[i].name)) {
2465 int id = kgsl_iommu_cbs[i].id;
2466
2467 ctx = &iommu->ctx[id];
2468 ctx->id = id;
2469 ctx->cb_num = -1;
2470 ctx->name = kgsl_iommu_cbs[i].name;
2471
2472 break;
2473 }
2474 }
2475
2476 if (ctx == NULL) {
2477 KGSL_CORE_ERR("dt: Unknown context label %s\n", node->name);
2478 return -EINVAL;
2479 }
2480
2481 if (ctx->id == KGSL_IOMMU_CONTEXT_SECURE)
2482 device->mmu.secured = true;
2483
2484 /* this property won't be found for all context banks */
2485 if (of_property_read_u32(node, "qcom,gpu-offset", &ctx->gpu_offset))
2486 ctx->gpu_offset = UINT_MAX;
2487
2488 ctx->kgsldev = device;
2489
2490 /* arm-smmu driver we'll have the right device pointer here. */
2491 if (of_find_property(node, "iommus", NULL)) {
2492 ctx->dev = &pdev->dev;
2493 } else {
2494 ctx->dev = kgsl_mmu_get_ctx(ctx->name);
2495
2496 if (IS_ERR(ctx->dev))
2497 return PTR_ERR(ctx->dev);
2498 }
2499
2500 return 0;
2501}
2502
2503static const struct {
2504 char *feature;
2505 int bit;
2506} kgsl_iommu_features[] = {
2507 { "qcom,retention", KGSL_MMU_RETENTION },
2508 { "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
2509 { "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC },
2510 { "qcom,force-32bit", KGSL_MMU_FORCE_32BIT },
2511};
2512
2513static int _kgsl_iommu_probe(struct kgsl_device *device,
2514 struct device_node *node)
2515{
2516 const char *cname;
2517 struct property *prop;
2518 u32 reg_val[2];
2519 int i = 0;
2520 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
2521 struct device_node *child;
2522 struct platform_device *pdev = of_find_device_by_node(node);
2523
2524 memset(iommu, 0, sizeof(*iommu));
2525
2526 if (of_device_is_compatible(node, "qcom,kgsl-smmu-v1"))
2527 iommu->version = 1;
2528 else
2529 iommu->version = 2;
2530
2531 if (of_property_read_u32_array(node, "reg", reg_val, 2)) {
2532 KGSL_CORE_ERR("dt: Unable to read KGSL IOMMU register range\n");
2533 return -EINVAL;
2534 }
2535 iommu->regstart = reg_val[0];
2536 iommu->regsize = reg_val[1];
2537
2538 /* Protecting the SMMU registers is mandatory */
2539 if (of_property_read_u32_array(node, "qcom,protect", reg_val, 2)) {
2540 KGSL_CORE_ERR("dt: no iommu protection range specified\n");
2541 return -EINVAL;
2542 }
2543 iommu->protect.base = reg_val[0] / sizeof(u32);
2544 iommu->protect.range = ilog2(reg_val[1] / sizeof(u32));
2545
2546 of_property_for_each_string(node, "clock-names", prop, cname) {
2547 struct clk *c = devm_clk_get(&pdev->dev, cname);
2548
2549 if (IS_ERR(c)) {
2550 KGSL_CORE_ERR("dt: Couldn't get clock: %s\n", cname);
2551 return -ENODEV;
2552 }
2553 if (i >= KGSL_IOMMU_MAX_CLKS) {
2554 KGSL_CORE_ERR("dt: too many clocks defined.\n");
2555 return -EINVAL;
2556 }
2557
2558 iommu->clks[i] = c;
2559 ++i;
2560 }
2561
2562 for (i = 0; i < ARRAY_SIZE(kgsl_iommu_features); i++) {
2563 if (of_property_read_bool(node, kgsl_iommu_features[i].feature))
2564 device->mmu.features |= kgsl_iommu_features[i].bit;
2565 }
2566
2567 if (of_property_read_u32(node, "qcom,micro-mmu-control",
2568 &iommu->micro_mmu_ctrl))
2569 iommu->micro_mmu_ctrl = UINT_MAX;
2570
2571 if (of_property_read_u32(node, "qcom,secure_align_mask",
2572 &device->mmu.secure_align_mask))
2573 device->mmu.secure_align_mask = 0xfff;
2574
2575 /* Fill out the rest of the devices in the node */
2576 of_platform_populate(node, NULL, NULL, &pdev->dev);
2577
2578 for_each_child_of_node(node, child) {
2579 int ret;
2580
2581 if (!of_device_is_compatible(child, "qcom,smmu-kgsl-cb"))
2582 continue;
2583
2584 ret = _kgsl_iommu_cb_probe(device, iommu, child);
2585 if (ret)
2586 return ret;
2587 }
2588
2589 return 0;
2590}
2591
2592static const struct {
2593 char *compat;
2594 int (*probe)(struct kgsl_device *device, struct device_node *node);
2595} kgsl_dt_devices[] = {
2596 { "qcom,kgsl-smmu-v1", _kgsl_iommu_probe },
2597 { "qcom,kgsl-smmu-v2", _kgsl_iommu_probe },
2598};
2599
2600static int kgsl_iommu_probe(struct kgsl_device *device)
2601{
2602 int i;
2603
2604 for (i = 0; i < ARRAY_SIZE(kgsl_dt_devices); i++) {
2605 struct device_node *node;
2606
2607 node = of_find_compatible_node(device->pdev->dev.of_node,
2608 NULL, kgsl_dt_devices[i].compat);
2609
2610 if (node != NULL)
2611 return kgsl_dt_devices[i].probe(device, node);
2612 }
2613
2614 return -ENODEV;
2615}
2616
2617struct kgsl_mmu_ops kgsl_iommu_ops = {
2618 .mmu_init = kgsl_iommu_init,
2619 .mmu_close = kgsl_iommu_close,
2620 .mmu_start = kgsl_iommu_start,
2621 .mmu_stop = kgsl_iommu_stop,
2622 .mmu_set_pt = kgsl_iommu_set_pt,
2623 .mmu_clear_fsr = kgsl_iommu_clear_fsr,
2624 .mmu_get_current_ttbr0 = kgsl_iommu_get_current_ttbr0,
2625 .mmu_enable_clk = kgsl_iommu_enable_clk,
2626 .mmu_disable_clk = kgsl_iommu_disable_clk,
2627 .mmu_get_reg_ahbaddr = kgsl_iommu_get_reg_ahbaddr,
2628 .mmu_pt_equal = kgsl_iommu_pt_equal,
2629 .mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
2630 .mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
2631 .mmu_get_prot_regs = kgsl_iommu_get_prot_regs,
2632 .mmu_init_pt = kgsl_iommu_init_pt,
2633 .mmu_add_global = kgsl_iommu_add_global,
2634 .mmu_remove_global = kgsl_iommu_remove_global,
2635 .mmu_getpagetable = kgsl_iommu_getpagetable,
2636 .mmu_get_qdss_global_entry = kgsl_iommu_get_qdss_global_entry,
2637 .probe = kgsl_iommu_probe,
2638};
2639
2640static struct kgsl_mmu_pt_ops iommu_pt_ops = {
2641 .mmu_map = kgsl_iommu_map,
2642 .mmu_unmap = kgsl_iommu_unmap,
2643 .mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
2644 .get_ttbr0 = kgsl_iommu_get_ttbr0,
2645 .get_contextidr = kgsl_iommu_get_contextidr,
2646 .get_gpuaddr = kgsl_iommu_get_gpuaddr,
2647 .put_gpuaddr = kgsl_iommu_put_gpuaddr,
2648 .set_svm_region = kgsl_iommu_set_svm_region,
2649 .find_svm_region = kgsl_iommu_find_svm_region,
2650 .svm_range = kgsl_iommu_svm_range,
2651 .addr_in_range = kgsl_iommu_addr_in_range,
2652 .mmu_map_offset = kgsl_iommu_map_offset,
2653 .mmu_unmap_offset = kgsl_iommu_unmap_offset,
2654 .mmu_sparse_dummy_map = kgsl_iommu_sparse_dummy_map,
2655};