blob: 8ea4492b59c394d76c37185140d390e740d32dd4 [file] [log] [blame]
Shrenuj Bansala419c792016-10-20 14:05:11 -07001/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/export.h>
14#include <linux/types.h>
15#include <linux/device.h>
16#include <linux/spinlock.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/types.h>
21
22#include "kgsl.h"
23#include "kgsl_mmu.h"
24#include "kgsl_device.h"
25#include "kgsl_sharedmem.h"
26
27static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
28
29static void kgsl_destroy_pagetable(struct kref *kref)
30{
31 struct kgsl_pagetable *pagetable = container_of(kref,
32 struct kgsl_pagetable, refcount);
33
34 kgsl_mmu_detach_pagetable(pagetable);
35
36 if (PT_OP_VALID(pagetable, mmu_destroy_pagetable))
37 pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
38
39 kfree(pagetable);
40}
41
42static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
43{
44 if (pagetable)
45 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
46}
47
48struct kgsl_pagetable *
49kgsl_get_pagetable(unsigned long name)
50{
51 struct kgsl_pagetable *pt, *ret = NULL;
52 unsigned long flags;
53
54 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
55 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
56 if (name == pt->name && kref_get_unless_zero(&pt->refcount)) {
57 ret = pt;
58 break;
59 }
60 }
61
62 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
63 return ret;
64}
65
66static struct kgsl_pagetable *
67_get_pt_from_kobj(struct kobject *kobj)
68{
69 unsigned int ptname;
70
71 if (!kobj)
72 return NULL;
73
74 if (kstrtou32(kobj->name, 0, &ptname))
75 return NULL;
76
77 return kgsl_get_pagetable(ptname);
78}
79
80static ssize_t
81sysfs_show_entries(struct kobject *kobj,
82 struct kobj_attribute *attr,
83 char *buf)
84{
85 struct kgsl_pagetable *pt;
86 int ret = 0;
87
88 pt = _get_pt_from_kobj(kobj);
89
90 if (pt) {
91 unsigned int val = atomic_read(&pt->stats.entries);
92
93 ret += snprintf(buf, PAGE_SIZE, "%d\n", val);
94 }
95
96 kgsl_put_pagetable(pt);
97 return ret;
98}
99
100static ssize_t
101sysfs_show_mapped(struct kobject *kobj,
102 struct kobj_attribute *attr,
103 char *buf)
104{
105 struct kgsl_pagetable *pt;
106 int ret = 0;
107
108 pt = _get_pt_from_kobj(kobj);
109
110 if (pt) {
111 uint64_t val = atomic_long_read(&pt->stats.mapped);
112
113 ret += snprintf(buf, PAGE_SIZE, "%llu\n", val);
114 }
115
116 kgsl_put_pagetable(pt);
117 return ret;
118}
119
120static ssize_t
121sysfs_show_max_mapped(struct kobject *kobj,
122 struct kobj_attribute *attr,
123 char *buf)
124{
125 struct kgsl_pagetable *pt;
126 int ret = 0;
127
128 pt = _get_pt_from_kobj(kobj);
129
130 if (pt) {
131 uint64_t val = atomic_long_read(&pt->stats.max_mapped);
132
133 ret += snprintf(buf, PAGE_SIZE, "%llu\n", val);
134 }
135
136 kgsl_put_pagetable(pt);
137 return ret;
138}
139
140static struct kobj_attribute attr_entries = {
141 .attr = { .name = "entries", .mode = 0444 },
142 .show = sysfs_show_entries,
143 .store = NULL,
144};
145
146static struct kobj_attribute attr_mapped = {
147 .attr = { .name = "mapped", .mode = 0444 },
148 .show = sysfs_show_mapped,
149 .store = NULL,
150};
151
152static struct kobj_attribute attr_max_mapped = {
153 .attr = { .name = "max_mapped", .mode = 0444 },
154 .show = sysfs_show_max_mapped,
155 .store = NULL,
156};
157
158static struct attribute *pagetable_attrs[] = {
159 &attr_entries.attr,
160 &attr_mapped.attr,
161 &attr_max_mapped.attr,
162 NULL,
163};
164
165static struct attribute_group pagetable_attr_group = {
166 .attrs = pagetable_attrs,
167};
168
169static void
170pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
171{
172 if (pagetable->kobj)
173 sysfs_remove_group(pagetable->kobj,
174 &pagetable_attr_group);
175
176 kobject_put(pagetable->kobj);
177 pagetable->kobj = NULL;
178}
179
180static int
181pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
182{
183 char ptname[16];
184 int ret = -ENOMEM;
185
186 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
187 pagetable->kobj = kobject_create_and_add(ptname,
188 kgsl_driver.ptkobj);
189 if (pagetable->kobj == NULL)
190 goto err;
191
192 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
193
194err:
195 if (ret) {
196 if (pagetable->kobj)
197 kobject_put(pagetable->kobj);
198
199 pagetable->kobj = NULL;
200 }
201
202 return ret;
203}
204
205void
206kgsl_mmu_detach_pagetable(struct kgsl_pagetable *pagetable)
207{
208 unsigned long flags;
209
210 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
211
212 if (!list_empty(&pagetable->list))
213 list_del_init(&pagetable->list);
214
215 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
216
217 pagetable_remove_sysfs_objects(pagetable);
218}
219
220struct kgsl_pagetable *kgsl_mmu_get_pt_from_ptname(struct kgsl_mmu *mmu,
221 int ptname)
222{
223 struct kgsl_pagetable *pt;
224
225 spin_lock(&kgsl_driver.ptlock);
226 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
227 if (pt->name == ptname) {
228 spin_unlock(&kgsl_driver.ptlock);
229 return pt;
230 }
231 }
232 spin_unlock(&kgsl_driver.ptlock);
233 return NULL;
234
235}
236EXPORT_SYMBOL(kgsl_mmu_get_pt_from_ptname);
237
238unsigned int
239kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, u64 pt_base,
240 uint64_t addr)
241{
242 struct kgsl_pagetable *pt;
243 unsigned int ret = 0;
244
245 if (!MMU_OP_VALID(mmu, mmu_pt_equal))
246 return 0;
247
248 spin_lock(&kgsl_driver.ptlock);
249 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
250 if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
251 if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
252 ret = 1;
253 break;
254 }
255 pt->fault_addr = (addr & ~(PAGE_SIZE-1));
256 ret = 0;
257 break;
258 }
259 }
260 spin_unlock(&kgsl_driver.ptlock);
261
262 return ret;
263}
264EXPORT_SYMBOL(kgsl_mmu_log_fault_addr);
265
266int kgsl_mmu_init(struct kgsl_device *device)
267{
268 struct kgsl_mmu *mmu = &device->mmu;
269
270 if (MMU_OP_VALID(mmu, mmu_init))
271 return mmu->mmu_ops->mmu_init(mmu);
272
273 return 0;
274}
275EXPORT_SYMBOL(kgsl_mmu_init);
276
277int kgsl_mmu_start(struct kgsl_device *device)
278{
279 struct kgsl_mmu *mmu = &device->mmu;
280
281 if (MMU_OP_VALID(mmu, mmu_start))
282 return mmu->mmu_ops->mmu_start(mmu);
283
284 return 0;
285}
286EXPORT_SYMBOL(kgsl_mmu_start);
287
288struct kgsl_pagetable *
289kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu, unsigned int name)
290{
291 int status = 0;
292 struct kgsl_pagetable *pagetable = NULL;
293 unsigned long flags;
294
295 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
296 if (pagetable == NULL)
297 return ERR_PTR(-ENOMEM);
298
299 kref_init(&pagetable->refcount);
300
301 spin_lock_init(&pagetable->lock);
302
303 pagetable->mmu = mmu;
304 pagetable->name = name;
305
306 atomic_set(&pagetable->stats.entries, 0);
307 atomic_long_set(&pagetable->stats.mapped, 0);
308 atomic_long_set(&pagetable->stats.max_mapped, 0);
309
310 if (MMU_OP_VALID(mmu, mmu_init_pt)) {
311 status = mmu->mmu_ops->mmu_init_pt(mmu, pagetable);
312 if (status) {
313 kfree(pagetable);
314 return ERR_PTR(status);
315 }
316 }
317
318 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
319 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
320 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
321
322 /* Create the sysfs entries */
323 pagetable_add_sysfs_objects(pagetable);
324
325 return pagetable;
326}
327
328void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
329{
330 kgsl_put_pagetable(pagetable);
331}
332EXPORT_SYMBOL(kgsl_mmu_putpagetable);
333
334/**
335 * kgsl_mmu_find_svm_region() - Find a empty spot in the SVM region
336 * @pagetable: KGSL pagetable to search
337 * @start: start of search range, must be within kgsl_mmu_svm_range()
338 * @end: end of search range, must be within kgsl_mmu_svm_range()
339 * @size: Size of the region to find
340 * @align: Desired alignment of the address
341 */
342uint64_t kgsl_mmu_find_svm_region(struct kgsl_pagetable *pagetable,
343 uint64_t start, uint64_t end, uint64_t size,
344 uint64_t align)
345{
346 if (PT_OP_VALID(pagetable, find_svm_region))
347 return pagetable->pt_ops->find_svm_region(pagetable, start,
348 end, size, align);
349 return -ENOMEM;
350}
351
352/**
353 * kgsl_mmu_set_svm_region() - Check if a region is empty and reserve it if so
354 * @pagetable: KGSL pagetable to search
355 * @gpuaddr: GPU address to check/reserve
356 * @size: Size of the region to check/reserve
357 */
358int kgsl_mmu_set_svm_region(struct kgsl_pagetable *pagetable, uint64_t gpuaddr,
359 uint64_t size)
360{
361 if (PT_OP_VALID(pagetable, set_svm_region))
362 return pagetable->pt_ops->set_svm_region(pagetable, gpuaddr,
363 size);
364 return -ENOMEM;
365}
366
367/**
368 * kgsl_mmu_get_gpuaddr() - Assign a GPU address to the memdesc
369 * @pagetable: GPU pagetable to assign the address in
370 * @memdesc: mem descriptor to assign the memory to
371 */
372int
373kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
374 struct kgsl_memdesc *memdesc)
375{
376 if (PT_OP_VALID(pagetable, get_gpuaddr))
377 return pagetable->pt_ops->get_gpuaddr(pagetable, memdesc);
378
379 return -ENOMEM;
380}
381EXPORT_SYMBOL(kgsl_mmu_get_gpuaddr);
382
383int
384kgsl_mmu_map(struct kgsl_pagetable *pagetable,
385 struct kgsl_memdesc *memdesc)
386{
387 int size;
388
389 if (!memdesc->gpuaddr)
390 return -EINVAL;
391 if (!(memdesc->flags & (KGSL_MEMFLAGS_SPARSE_VIRT |
392 KGSL_MEMFLAGS_SPARSE_PHYS))) {
393 /* Only global mappings should be mapped multiple times */
394 if (!kgsl_memdesc_is_global(memdesc) &&
395 (KGSL_MEMDESC_MAPPED & memdesc->priv))
396 return -EINVAL;
397 }
398
399 size = kgsl_memdesc_footprint(memdesc);
400
401 if (PT_OP_VALID(pagetable, mmu_map)) {
402 int ret;
403
404 ret = pagetable->pt_ops->mmu_map(pagetable, memdesc);
405 if (ret)
406 return ret;
407
408 atomic_inc(&pagetable->stats.entries);
409 KGSL_STATS_ADD(size, &pagetable->stats.mapped,
410 &pagetable->stats.max_mapped);
411
412 /* This is needed for non-sparse mappings */
413 memdesc->priv |= KGSL_MEMDESC_MAPPED;
414 }
415
416 return 0;
417}
418EXPORT_SYMBOL(kgsl_mmu_map);
419
420/**
421 * kgsl_mmu_put_gpuaddr() - Remove a GPU address from a pagetable
422 * @pagetable: Pagetable to release the memory from
423 * @memdesc: Memory descriptor containing the GPU address to free
424 */
425void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc)
426{
427 struct kgsl_pagetable *pagetable = memdesc->pagetable;
428 int unmap_fail = 0;
429
430 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
431 return;
432
433 if (!kgsl_memdesc_is_global(memdesc))
434 unmap_fail = kgsl_mmu_unmap(pagetable, memdesc);
435
436 /*
437 * Do not free the gpuaddr/size if unmap fails. Because if we
438 * try to map this range in future, the iommu driver will throw
439 * a BUG_ON() because it feels we are overwriting a mapping.
440 */
441 if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0))
442 pagetable->pt_ops->put_gpuaddr(memdesc);
443
444 if (!kgsl_memdesc_is_global(memdesc))
445 memdesc->gpuaddr = 0;
446
447 memdesc->pagetable = NULL;
448}
449EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr);
450
451/**
452 * kgsl_mmu_svm_range() - Return the range for SVM (if applicable)
453 * @pagetable: Pagetable to query the range from
454 * @lo: Pointer to store the start of the SVM range
455 * @hi: Pointer to store the end of the SVM range
456 * @memflags: Flags from the buffer we are mapping
457 */
458int kgsl_mmu_svm_range(struct kgsl_pagetable *pagetable,
459 uint64_t *lo, uint64_t *hi, uint64_t memflags)
460{
461 if (PT_OP_VALID(pagetable, svm_range))
462 return pagetable->pt_ops->svm_range(pagetable, lo, hi,
463 memflags);
464
465 return -ENODEV;
466}
467EXPORT_SYMBOL(kgsl_mmu_svm_range);
468
469int
470kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
471 struct kgsl_memdesc *memdesc)
472{
473 int ret = 0;
474
475 if (memdesc->size == 0)
476 return -EINVAL;
477
478 if (!(memdesc->flags & (KGSL_MEMFLAGS_SPARSE_VIRT |
479 KGSL_MEMFLAGS_SPARSE_PHYS))) {
480 /* Only global mappings should be mapped multiple times */
481 if (!(KGSL_MEMDESC_MAPPED & memdesc->priv))
482 return -EINVAL;
483 }
484
485 if (PT_OP_VALID(pagetable, mmu_unmap)) {
486 uint64_t size;
487
488 size = kgsl_memdesc_footprint(memdesc);
489
490 ret = pagetable->pt_ops->mmu_unmap(pagetable, memdesc);
491
492 atomic_dec(&pagetable->stats.entries);
493 atomic_long_sub(size, &pagetable->stats.mapped);
494
495 if (!kgsl_memdesc_is_global(memdesc))
496 memdesc->priv &= ~KGSL_MEMDESC_MAPPED;
497 }
498
499 return ret;
500}
501EXPORT_SYMBOL(kgsl_mmu_unmap);
502
503int kgsl_mmu_map_offset(struct kgsl_pagetable *pagetable,
504 uint64_t virtaddr, uint64_t virtoffset,
505 struct kgsl_memdesc *memdesc, uint64_t physoffset,
506 uint64_t size, uint64_t flags)
507{
508 if (PT_OP_VALID(pagetable, mmu_map_offset)) {
509 int ret;
510
511 ret = pagetable->pt_ops->mmu_map_offset(pagetable, virtaddr,
512 virtoffset, memdesc, physoffset, size, flags);
513 if (ret)
514 return ret;
515
516 atomic_inc(&pagetable->stats.entries);
517 KGSL_STATS_ADD(size, &pagetable->stats.mapped,
518 &pagetable->stats.max_mapped);
519 }
520
521 return 0;
522}
523EXPORT_SYMBOL(kgsl_mmu_map_offset);
524
525int kgsl_mmu_unmap_offset(struct kgsl_pagetable *pagetable,
526 struct kgsl_memdesc *memdesc, uint64_t addr, uint64_t offset,
527 uint64_t size)
528{
529 if (PT_OP_VALID(pagetable, mmu_unmap_offset)) {
530 int ret;
531
532 ret = pagetable->pt_ops->mmu_unmap_offset(pagetable, memdesc,
533 addr, offset, size);
534 if (ret)
535 return ret;
536
537 atomic_dec(&pagetable->stats.entries);
538 atomic_long_sub(size, &pagetable->stats.mapped);
539 }
540
541 return 0;
542}
543EXPORT_SYMBOL(kgsl_mmu_unmap_offset);
544
545int kgsl_mmu_sparse_dummy_map(struct kgsl_pagetable *pagetable,
546 struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size)
547{
548 if (PT_OP_VALID(pagetable, mmu_sparse_dummy_map)) {
549 int ret;
550
551 ret = pagetable->pt_ops->mmu_sparse_dummy_map(pagetable,
552 memdesc, offset, size);
553 if (ret)
554 return ret;
555
556 atomic_dec(&pagetable->stats.entries);
557 atomic_long_sub(size, &pagetable->stats.mapped);
558 }
559
560 return 0;
561}
562EXPORT_SYMBOL(kgsl_mmu_sparse_dummy_map);
563
564void kgsl_mmu_remove_global(struct kgsl_device *device,
565 struct kgsl_memdesc *memdesc)
566{
567 struct kgsl_mmu *mmu = &device->mmu;
568
569 if (MMU_OP_VALID(mmu, mmu_remove_global))
570 mmu->mmu_ops->mmu_remove_global(mmu, memdesc);
571}
572EXPORT_SYMBOL(kgsl_mmu_remove_global);
573
574void kgsl_mmu_add_global(struct kgsl_device *device,
575 struct kgsl_memdesc *memdesc, const char *name)
576{
577 struct kgsl_mmu *mmu = &device->mmu;
578
579 if (MMU_OP_VALID(mmu, mmu_add_global))
580 mmu->mmu_ops->mmu_add_global(mmu, memdesc, name);
581}
582EXPORT_SYMBOL(kgsl_mmu_add_global);
583
584void kgsl_mmu_close(struct kgsl_device *device)
585{
586 struct kgsl_mmu *mmu = &(device->mmu);
587
588 if (MMU_OP_VALID(mmu, mmu_close))
589 mmu->mmu_ops->mmu_close(mmu);
590}
591EXPORT_SYMBOL(kgsl_mmu_close);
592
593enum kgsl_mmutype kgsl_mmu_get_mmutype(struct kgsl_device *device)
594{
595 return device ? device->mmu.type : KGSL_MMU_TYPE_NONE;
596}
597EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
598
599bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
600 uint64_t gpuaddr)
601{
602 if (PT_OP_VALID(pagetable, addr_in_range))
603 return pagetable->pt_ops->addr_in_range(pagetable, gpuaddr);
604
605 return false;
606}
607EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);
608
609struct kgsl_memdesc *kgsl_mmu_get_qdss_global_entry(struct kgsl_device *device)
610{
611 struct kgsl_mmu *mmu = &device->mmu;
612
613 if (MMU_OP_VALID(mmu, mmu_get_qdss_global_entry))
614 return mmu->mmu_ops->mmu_get_qdss_global_entry();
615
616 return NULL;
617}
618EXPORT_SYMBOL(kgsl_mmu_get_qdss_global_entry);
619
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700620struct kgsl_memdesc *kgsl_mmu_get_qtimer_global_entry(
621 struct kgsl_device *device)
622{
623 struct kgsl_mmu *mmu = &device->mmu;
624
625 if (MMU_OP_VALID(mmu, mmu_get_qtimer_global_entry))
626 return mmu->mmu_ops->mmu_get_qtimer_global_entry();
627
628 return NULL;
629}
630EXPORT_SYMBOL(kgsl_mmu_get_qtimer_global_entry);
631
Shrenuj Bansala419c792016-10-20 14:05:11 -0700632/*
633 * NOMMU definitions - NOMMU really just means that the MMU is kept in pass
634 * through and the GPU directly accesses physical memory. Used in debug mode
635 * and when a real MMU isn't up and running yet.
636 */
637
638static bool nommu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
639 uint64_t gpuaddr)
640{
641 return (gpuaddr != 0) ? true : false;
642}
643
644static int nommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
645 struct kgsl_memdesc *memdesc)
646{
647 if (memdesc->sgt->nents > 1) {
648 WARN_ONCE(1,
649 "Attempt to map non-contiguous memory with NOMMU\n");
650 return -EINVAL;
651 }
652
653 memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl);
654
655 if (memdesc->gpuaddr) {
656 memdesc->pagetable = pagetable;
657 return 0;
658 }
659
660 return -ENOMEM;
661}
662
663static struct kgsl_mmu_pt_ops nommu_pt_ops = {
664 .get_gpuaddr = nommu_get_gpuaddr,
665 .addr_in_range = nommu_gpuaddr_in_range,
666};
667
668static void nommu_add_global(struct kgsl_mmu *mmu,
669 struct kgsl_memdesc *memdesc, const char *name)
670{
671 memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl);
672}
673
674static void nommu_remove_global(struct kgsl_mmu *mmu,
675 struct kgsl_memdesc *memdesc)
676{
677 memdesc->gpuaddr = 0;
678}
679
680static int nommu_init_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
681{
682 if (pt == NULL)
683 return -EINVAL;
684
685 pt->pt_ops = &nommu_pt_ops;
686 return 0;
687}
688
689static struct kgsl_pagetable *nommu_getpagetable(struct kgsl_mmu *mmu,
690 unsigned long name)
691{
692 struct kgsl_pagetable *pagetable;
693
694 pagetable = kgsl_get_pagetable(KGSL_MMU_GLOBAL_PT);
695
696 if (pagetable == NULL)
697 pagetable = kgsl_mmu_createpagetableobject(mmu,
698 KGSL_MMU_GLOBAL_PT);
699
700 return pagetable;
701}
702
703static int nommu_init(struct kgsl_mmu *mmu)
704{
705 mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
706 return 0;
707}
708
709static int nommu_probe(struct kgsl_device *device)
710{
711 /* NOMMU always exists */
712 return 0;
713}
714
715static struct kgsl_mmu_ops kgsl_nommu_ops = {
716 .mmu_init = nommu_init,
717 .mmu_add_global = nommu_add_global,
718 .mmu_remove_global = nommu_remove_global,
719 .mmu_init_pt = nommu_init_pt,
720 .mmu_getpagetable = nommu_getpagetable,
721 .probe = nommu_probe,
722};
723
724static struct {
725 const char *name;
726 unsigned int type;
727 struct kgsl_mmu_ops *ops;
728} kgsl_mmu_subtypes[] = {
729#ifdef CONFIG_QCOM_KGSL_IOMMU
730 { "iommu", KGSL_MMU_TYPE_IOMMU, &kgsl_iommu_ops },
731#endif
732 { "nommu", KGSL_MMU_TYPE_NONE, &kgsl_nommu_ops },
733};
734
735int kgsl_mmu_probe(struct kgsl_device *device, char *mmutype)
736{
737 struct kgsl_mmu *mmu = &device->mmu;
738 int ret, i;
739
740 if (mmutype != NULL) {
741 for (i = 0; i < ARRAY_SIZE(kgsl_mmu_subtypes); i++) {
742 if (strcmp(kgsl_mmu_subtypes[i].name, mmutype))
743 continue;
744
745 ret = kgsl_mmu_subtypes[i].ops->probe(device);
746
747 if (ret == 0) {
748 mmu->type = kgsl_mmu_subtypes[i].type;
749 mmu->mmu_ops = kgsl_mmu_subtypes[i].ops;
750
751 if (MMU_OP_VALID(mmu, mmu_init))
752 return mmu->mmu_ops->mmu_init(mmu);
753 }
754
755 return ret;
756 }
757
758 KGSL_CORE_ERR("mmu: MMU type '%s' unknown\n", mmutype);
759 }
760
761 for (i = 0; i < ARRAY_SIZE(kgsl_mmu_subtypes); i++) {
762 ret = kgsl_mmu_subtypes[i].ops->probe(device);
763
764 if (ret == 0) {
765 mmu->type = kgsl_mmu_subtypes[i].type;
766 mmu->mmu_ops = kgsl_mmu_subtypes[i].ops;
767
768 if (MMU_OP_VALID(mmu, mmu_init))
769 return mmu->mmu_ops->mmu_init(mmu);
770
771 return 0;
772 }
773 }
774
775 KGSL_CORE_ERR("mmu: couldn't detect any known MMU types\n");
776 return -ENODEV;
777}
778EXPORT_SYMBOL(kgsl_mmu_probe);