blob: e12b70b03d5fe33e88467bf5afc31b03ef81f970 [file] [log] [blame]
Neeraja Pb8350672021-01-07 20:48:09 +05301/* Copyright (c) 2002,2007-2017,2021, The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/export.h>
14#include <linux/types.h>
15#include <linux/device.h>
16#include <linux/spinlock.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/types.h>
21
22#include "kgsl.h"
23#include "kgsl_mmu.h"
24#include "kgsl_device.h"
25#include "kgsl_sharedmem.h"
26
27static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
28
Lynus Vaz711352e2017-10-03 12:13:23 +053029static void _deferred_destroy(struct work_struct *ws)
30{
31 struct kgsl_pagetable *pagetable = container_of(ws,
32 struct kgsl_pagetable, destroy_ws);
33
34 if (PT_OP_VALID(pagetable, mmu_destroy_pagetable))
35 pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
36
37 kfree(pagetable);
38}
39
Shrenuj Bansala419c792016-10-20 14:05:11 -070040static void kgsl_destroy_pagetable(struct kref *kref)
41{
42 struct kgsl_pagetable *pagetable = container_of(kref,
43 struct kgsl_pagetable, refcount);
44
45 kgsl_mmu_detach_pagetable(pagetable);
46
Lynus Vaz711352e2017-10-03 12:13:23 +053047 kgsl_schedule_work(&pagetable->destroy_ws);
Shrenuj Bansala419c792016-10-20 14:05:11 -070048}
49
50static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
51{
52 if (pagetable)
53 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
54}
55
56struct kgsl_pagetable *
57kgsl_get_pagetable(unsigned long name)
58{
59 struct kgsl_pagetable *pt, *ret = NULL;
60 unsigned long flags;
61
62 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
63 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
64 if (name == pt->name && kref_get_unless_zero(&pt->refcount)) {
65 ret = pt;
66 break;
67 }
68 }
69
70 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
71 return ret;
72}
73
74static struct kgsl_pagetable *
75_get_pt_from_kobj(struct kobject *kobj)
76{
77 unsigned int ptname;
78
79 if (!kobj)
80 return NULL;
81
82 if (kstrtou32(kobj->name, 0, &ptname))
83 return NULL;
84
85 return kgsl_get_pagetable(ptname);
86}
87
88static ssize_t
89sysfs_show_entries(struct kobject *kobj,
90 struct kobj_attribute *attr,
91 char *buf)
92{
93 struct kgsl_pagetable *pt;
94 int ret = 0;
95
96 pt = _get_pt_from_kobj(kobj);
97
98 if (pt) {
99 unsigned int val = atomic_read(&pt->stats.entries);
100
101 ret += snprintf(buf, PAGE_SIZE, "%d\n", val);
102 }
103
104 kgsl_put_pagetable(pt);
105 return ret;
106}
107
108static ssize_t
109sysfs_show_mapped(struct kobject *kobj,
110 struct kobj_attribute *attr,
111 char *buf)
112{
113 struct kgsl_pagetable *pt;
114 int ret = 0;
115
116 pt = _get_pt_from_kobj(kobj);
117
118 if (pt) {
119 uint64_t val = atomic_long_read(&pt->stats.mapped);
120
121 ret += snprintf(buf, PAGE_SIZE, "%llu\n", val);
122 }
123
124 kgsl_put_pagetable(pt);
125 return ret;
126}
127
128static ssize_t
129sysfs_show_max_mapped(struct kobject *kobj,
130 struct kobj_attribute *attr,
131 char *buf)
132{
133 struct kgsl_pagetable *pt;
134 int ret = 0;
135
136 pt = _get_pt_from_kobj(kobj);
137
138 if (pt) {
139 uint64_t val = atomic_long_read(&pt->stats.max_mapped);
140
141 ret += snprintf(buf, PAGE_SIZE, "%llu\n", val);
142 }
143
144 kgsl_put_pagetable(pt);
145 return ret;
146}
147
148static struct kobj_attribute attr_entries = {
149 .attr = { .name = "entries", .mode = 0444 },
150 .show = sysfs_show_entries,
151 .store = NULL,
152};
153
154static struct kobj_attribute attr_mapped = {
155 .attr = { .name = "mapped", .mode = 0444 },
156 .show = sysfs_show_mapped,
157 .store = NULL,
158};
159
160static struct kobj_attribute attr_max_mapped = {
161 .attr = { .name = "max_mapped", .mode = 0444 },
162 .show = sysfs_show_max_mapped,
163 .store = NULL,
164};
165
166static struct attribute *pagetable_attrs[] = {
167 &attr_entries.attr,
168 &attr_mapped.attr,
169 &attr_max_mapped.attr,
170 NULL,
171};
172
173static struct attribute_group pagetable_attr_group = {
174 .attrs = pagetable_attrs,
175};
176
177static void
178pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
179{
180 if (pagetable->kobj)
181 sysfs_remove_group(pagetable->kobj,
182 &pagetable_attr_group);
183
184 kobject_put(pagetable->kobj);
185 pagetable->kobj = NULL;
186}
187
188static int
189pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
190{
191 char ptname[16];
192 int ret = -ENOMEM;
193
194 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
195 pagetable->kobj = kobject_create_and_add(ptname,
196 kgsl_driver.ptkobj);
197 if (pagetable->kobj == NULL)
198 goto err;
199
200 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
201
202err:
203 if (ret) {
204 if (pagetable->kobj)
205 kobject_put(pagetable->kobj);
206
207 pagetable->kobj = NULL;
208 }
209
210 return ret;
211}
212
213void
214kgsl_mmu_detach_pagetable(struct kgsl_pagetable *pagetable)
215{
216 unsigned long flags;
217
218 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
219
220 if (!list_empty(&pagetable->list))
221 list_del_init(&pagetable->list);
222
223 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
224
225 pagetable_remove_sysfs_objects(pagetable);
226}
227
228struct kgsl_pagetable *kgsl_mmu_get_pt_from_ptname(struct kgsl_mmu *mmu,
229 int ptname)
230{
231 struct kgsl_pagetable *pt;
232
233 spin_lock(&kgsl_driver.ptlock);
234 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
235 if (pt->name == ptname) {
236 spin_unlock(&kgsl_driver.ptlock);
237 return pt;
238 }
239 }
240 spin_unlock(&kgsl_driver.ptlock);
241 return NULL;
242
243}
244EXPORT_SYMBOL(kgsl_mmu_get_pt_from_ptname);
245
246unsigned int
247kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, u64 pt_base,
248 uint64_t addr)
249{
250 struct kgsl_pagetable *pt;
251 unsigned int ret = 0;
252
253 if (!MMU_OP_VALID(mmu, mmu_pt_equal))
254 return 0;
255
256 spin_lock(&kgsl_driver.ptlock);
257 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
258 if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
259 if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
260 ret = 1;
261 break;
262 }
263 pt->fault_addr = (addr & ~(PAGE_SIZE-1));
264 ret = 0;
265 break;
266 }
267 }
268 spin_unlock(&kgsl_driver.ptlock);
269
270 return ret;
271}
272EXPORT_SYMBOL(kgsl_mmu_log_fault_addr);
273
274int kgsl_mmu_init(struct kgsl_device *device)
275{
276 struct kgsl_mmu *mmu = &device->mmu;
277
278 if (MMU_OP_VALID(mmu, mmu_init))
279 return mmu->mmu_ops->mmu_init(mmu);
280
281 return 0;
282}
283EXPORT_SYMBOL(kgsl_mmu_init);
284
285int kgsl_mmu_start(struct kgsl_device *device)
286{
287 struct kgsl_mmu *mmu = &device->mmu;
288
289 if (MMU_OP_VALID(mmu, mmu_start))
290 return mmu->mmu_ops->mmu_start(mmu);
291
292 return 0;
293}
294EXPORT_SYMBOL(kgsl_mmu_start);
295
296struct kgsl_pagetable *
297kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu, unsigned int name)
298{
299 int status = 0;
300 struct kgsl_pagetable *pagetable = NULL;
301 unsigned long flags;
302
303 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
304 if (pagetable == NULL)
305 return ERR_PTR(-ENOMEM);
306
307 kref_init(&pagetable->refcount);
308
309 spin_lock_init(&pagetable->lock);
Lynus Vaz711352e2017-10-03 12:13:23 +0530310 INIT_WORK(&pagetable->destroy_ws, _deferred_destroy);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700311
312 pagetable->mmu = mmu;
313 pagetable->name = name;
314
315 atomic_set(&pagetable->stats.entries, 0);
316 atomic_long_set(&pagetable->stats.mapped, 0);
317 atomic_long_set(&pagetable->stats.max_mapped, 0);
318
319 if (MMU_OP_VALID(mmu, mmu_init_pt)) {
320 status = mmu->mmu_ops->mmu_init_pt(mmu, pagetable);
321 if (status) {
322 kfree(pagetable);
323 return ERR_PTR(status);
324 }
325 }
326
327 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
328 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
329 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
330
331 /* Create the sysfs entries */
332 pagetable_add_sysfs_objects(pagetable);
333
334 return pagetable;
335}
336
337void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
338{
339 kgsl_put_pagetable(pagetable);
340}
341EXPORT_SYMBOL(kgsl_mmu_putpagetable);
342
343/**
344 * kgsl_mmu_find_svm_region() - Find a empty spot in the SVM region
345 * @pagetable: KGSL pagetable to search
346 * @start: start of search range, must be within kgsl_mmu_svm_range()
347 * @end: end of search range, must be within kgsl_mmu_svm_range()
348 * @size: Size of the region to find
349 * @align: Desired alignment of the address
350 */
351uint64_t kgsl_mmu_find_svm_region(struct kgsl_pagetable *pagetable,
352 uint64_t start, uint64_t end, uint64_t size,
353 uint64_t align)
354{
355 if (PT_OP_VALID(pagetable, find_svm_region))
356 return pagetable->pt_ops->find_svm_region(pagetable, start,
357 end, size, align);
358 return -ENOMEM;
359}
360
361/**
362 * kgsl_mmu_set_svm_region() - Check if a region is empty and reserve it if so
363 * @pagetable: KGSL pagetable to search
364 * @gpuaddr: GPU address to check/reserve
365 * @size: Size of the region to check/reserve
366 */
367int kgsl_mmu_set_svm_region(struct kgsl_pagetable *pagetable, uint64_t gpuaddr,
368 uint64_t size)
369{
370 if (PT_OP_VALID(pagetable, set_svm_region))
371 return pagetable->pt_ops->set_svm_region(pagetable, gpuaddr,
372 size);
373 return -ENOMEM;
374}
375
376/**
377 * kgsl_mmu_get_gpuaddr() - Assign a GPU address to the memdesc
378 * @pagetable: GPU pagetable to assign the address in
379 * @memdesc: mem descriptor to assign the memory to
380 */
381int
382kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
383 struct kgsl_memdesc *memdesc)
384{
385 if (PT_OP_VALID(pagetable, get_gpuaddr))
386 return pagetable->pt_ops->get_gpuaddr(pagetable, memdesc);
387
388 return -ENOMEM;
389}
390EXPORT_SYMBOL(kgsl_mmu_get_gpuaddr);
391
392int
393kgsl_mmu_map(struct kgsl_pagetable *pagetable,
394 struct kgsl_memdesc *memdesc)
395{
396 int size;
397
398 if (!memdesc->gpuaddr)
399 return -EINVAL;
400 if (!(memdesc->flags & (KGSL_MEMFLAGS_SPARSE_VIRT |
401 KGSL_MEMFLAGS_SPARSE_PHYS))) {
402 /* Only global mappings should be mapped multiple times */
403 if (!kgsl_memdesc_is_global(memdesc) &&
404 (KGSL_MEMDESC_MAPPED & memdesc->priv))
405 return -EINVAL;
406 }
407
408 size = kgsl_memdesc_footprint(memdesc);
409
410 if (PT_OP_VALID(pagetable, mmu_map)) {
411 int ret;
412
413 ret = pagetable->pt_ops->mmu_map(pagetable, memdesc);
414 if (ret)
415 return ret;
416
417 atomic_inc(&pagetable->stats.entries);
418 KGSL_STATS_ADD(size, &pagetable->stats.mapped,
419 &pagetable->stats.max_mapped);
420
421 /* This is needed for non-sparse mappings */
422 memdesc->priv |= KGSL_MEMDESC_MAPPED;
423 }
424
425 return 0;
426}
427EXPORT_SYMBOL(kgsl_mmu_map);
428
429/**
430 * kgsl_mmu_put_gpuaddr() - Remove a GPU address from a pagetable
431 * @pagetable: Pagetable to release the memory from
432 * @memdesc: Memory descriptor containing the GPU address to free
433 */
434void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc)
435{
436 struct kgsl_pagetable *pagetable = memdesc->pagetable;
437 int unmap_fail = 0;
438
439 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
440 return;
441
Neeraja Pb8350672021-01-07 20:48:09 +0530442 if (!kgsl_memdesc_is_global(memdesc) &&
443 (KGSL_MEMDESC_MAPPED & memdesc->priv))
Shrenuj Bansala419c792016-10-20 14:05:11 -0700444 unmap_fail = kgsl_mmu_unmap(pagetable, memdesc);
445
446 /*
447 * Do not free the gpuaddr/size if unmap fails. Because if we
448 * try to map this range in future, the iommu driver will throw
449 * a BUG_ON() because it feels we are overwriting a mapping.
450 */
451 if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0))
452 pagetable->pt_ops->put_gpuaddr(memdesc);
453
454 if (!kgsl_memdesc_is_global(memdesc))
455 memdesc->gpuaddr = 0;
456
457 memdesc->pagetable = NULL;
458}
459EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr);
460
461/**
462 * kgsl_mmu_svm_range() - Return the range for SVM (if applicable)
463 * @pagetable: Pagetable to query the range from
464 * @lo: Pointer to store the start of the SVM range
465 * @hi: Pointer to store the end of the SVM range
466 * @memflags: Flags from the buffer we are mapping
467 */
468int kgsl_mmu_svm_range(struct kgsl_pagetable *pagetable,
469 uint64_t *lo, uint64_t *hi, uint64_t memflags)
470{
471 if (PT_OP_VALID(pagetable, svm_range))
472 return pagetable->pt_ops->svm_range(pagetable, lo, hi,
473 memflags);
474
475 return -ENODEV;
476}
477EXPORT_SYMBOL(kgsl_mmu_svm_range);
478
479int
480kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
481 struct kgsl_memdesc *memdesc)
482{
483 int ret = 0;
484
485 if (memdesc->size == 0)
486 return -EINVAL;
487
488 if (!(memdesc->flags & (KGSL_MEMFLAGS_SPARSE_VIRT |
489 KGSL_MEMFLAGS_SPARSE_PHYS))) {
490 /* Only global mappings should be mapped multiple times */
491 if (!(KGSL_MEMDESC_MAPPED & memdesc->priv))
492 return -EINVAL;
493 }
494
495 if (PT_OP_VALID(pagetable, mmu_unmap)) {
496 uint64_t size;
497
498 size = kgsl_memdesc_footprint(memdesc);
499
500 ret = pagetable->pt_ops->mmu_unmap(pagetable, memdesc);
501
502 atomic_dec(&pagetable->stats.entries);
503 atomic_long_sub(size, &pagetable->stats.mapped);
504
505 if (!kgsl_memdesc_is_global(memdesc))
506 memdesc->priv &= ~KGSL_MEMDESC_MAPPED;
507 }
508
509 return ret;
510}
511EXPORT_SYMBOL(kgsl_mmu_unmap);
512
513int kgsl_mmu_map_offset(struct kgsl_pagetable *pagetable,
514 uint64_t virtaddr, uint64_t virtoffset,
515 struct kgsl_memdesc *memdesc, uint64_t physoffset,
516 uint64_t size, uint64_t flags)
517{
518 if (PT_OP_VALID(pagetable, mmu_map_offset)) {
519 int ret;
520
521 ret = pagetable->pt_ops->mmu_map_offset(pagetable, virtaddr,
522 virtoffset, memdesc, physoffset, size, flags);
523 if (ret)
524 return ret;
525
526 atomic_inc(&pagetable->stats.entries);
527 KGSL_STATS_ADD(size, &pagetable->stats.mapped,
528 &pagetable->stats.max_mapped);
529 }
530
531 return 0;
532}
533EXPORT_SYMBOL(kgsl_mmu_map_offset);
534
535int kgsl_mmu_unmap_offset(struct kgsl_pagetable *pagetable,
536 struct kgsl_memdesc *memdesc, uint64_t addr, uint64_t offset,
537 uint64_t size)
538{
539 if (PT_OP_VALID(pagetable, mmu_unmap_offset)) {
540 int ret;
541
542 ret = pagetable->pt_ops->mmu_unmap_offset(pagetable, memdesc,
543 addr, offset, size);
544 if (ret)
545 return ret;
546
547 atomic_dec(&pagetable->stats.entries);
548 atomic_long_sub(size, &pagetable->stats.mapped);
549 }
550
551 return 0;
552}
553EXPORT_SYMBOL(kgsl_mmu_unmap_offset);
554
555int kgsl_mmu_sparse_dummy_map(struct kgsl_pagetable *pagetable,
556 struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size)
557{
558 if (PT_OP_VALID(pagetable, mmu_sparse_dummy_map)) {
559 int ret;
560
561 ret = pagetable->pt_ops->mmu_sparse_dummy_map(pagetable,
562 memdesc, offset, size);
563 if (ret)
564 return ret;
565
566 atomic_dec(&pagetable->stats.entries);
567 atomic_long_sub(size, &pagetable->stats.mapped);
568 }
569
570 return 0;
571}
572EXPORT_SYMBOL(kgsl_mmu_sparse_dummy_map);
573
574void kgsl_mmu_remove_global(struct kgsl_device *device,
575 struct kgsl_memdesc *memdesc)
576{
577 struct kgsl_mmu *mmu = &device->mmu;
578
579 if (MMU_OP_VALID(mmu, mmu_remove_global))
580 mmu->mmu_ops->mmu_remove_global(mmu, memdesc);
581}
582EXPORT_SYMBOL(kgsl_mmu_remove_global);
583
584void kgsl_mmu_add_global(struct kgsl_device *device,
585 struct kgsl_memdesc *memdesc, const char *name)
586{
587 struct kgsl_mmu *mmu = &device->mmu;
588
589 if (MMU_OP_VALID(mmu, mmu_add_global))
590 mmu->mmu_ops->mmu_add_global(mmu, memdesc, name);
591}
592EXPORT_SYMBOL(kgsl_mmu_add_global);
593
594void kgsl_mmu_close(struct kgsl_device *device)
595{
596 struct kgsl_mmu *mmu = &(device->mmu);
597
598 if (MMU_OP_VALID(mmu, mmu_close))
599 mmu->mmu_ops->mmu_close(mmu);
600}
601EXPORT_SYMBOL(kgsl_mmu_close);
602
603enum kgsl_mmutype kgsl_mmu_get_mmutype(struct kgsl_device *device)
604{
605 return device ? device->mmu.type : KGSL_MMU_TYPE_NONE;
606}
607EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
608
609bool kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
610 uint64_t gpuaddr)
611{
612 if (PT_OP_VALID(pagetable, addr_in_range))
613 return pagetable->pt_ops->addr_in_range(pagetable, gpuaddr);
614
615 return false;
616}
617EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);
618
619struct kgsl_memdesc *kgsl_mmu_get_qdss_global_entry(struct kgsl_device *device)
620{
621 struct kgsl_mmu *mmu = &device->mmu;
622
623 if (MMU_OP_VALID(mmu, mmu_get_qdss_global_entry))
624 return mmu->mmu_ops->mmu_get_qdss_global_entry();
625
626 return NULL;
627}
628EXPORT_SYMBOL(kgsl_mmu_get_qdss_global_entry);
629
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700630struct kgsl_memdesc *kgsl_mmu_get_qtimer_global_entry(
631 struct kgsl_device *device)
632{
633 struct kgsl_mmu *mmu = &device->mmu;
634
635 if (MMU_OP_VALID(mmu, mmu_get_qtimer_global_entry))
636 return mmu->mmu_ops->mmu_get_qtimer_global_entry();
637
638 return NULL;
639}
640EXPORT_SYMBOL(kgsl_mmu_get_qtimer_global_entry);
641
Shrenuj Bansala419c792016-10-20 14:05:11 -0700642/*
643 * NOMMU definitions - NOMMU really just means that the MMU is kept in pass
644 * through and the GPU directly accesses physical memory. Used in debug mode
645 * and when a real MMU isn't up and running yet.
646 */
647
648static bool nommu_gpuaddr_in_range(struct kgsl_pagetable *pagetable,
649 uint64_t gpuaddr)
650{
651 return (gpuaddr != 0) ? true : false;
652}
653
654static int nommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
655 struct kgsl_memdesc *memdesc)
656{
657 if (memdesc->sgt->nents > 1) {
658 WARN_ONCE(1,
659 "Attempt to map non-contiguous memory with NOMMU\n");
660 return -EINVAL;
661 }
662
663 memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl);
664
665 if (memdesc->gpuaddr) {
666 memdesc->pagetable = pagetable;
667 return 0;
668 }
669
670 return -ENOMEM;
671}
672
673static struct kgsl_mmu_pt_ops nommu_pt_ops = {
674 .get_gpuaddr = nommu_get_gpuaddr,
675 .addr_in_range = nommu_gpuaddr_in_range,
676};
677
678static void nommu_add_global(struct kgsl_mmu *mmu,
679 struct kgsl_memdesc *memdesc, const char *name)
680{
681 memdesc->gpuaddr = (uint64_t) sg_phys(memdesc->sgt->sgl);
682}
683
684static void nommu_remove_global(struct kgsl_mmu *mmu,
685 struct kgsl_memdesc *memdesc)
686{
687 memdesc->gpuaddr = 0;
688}
689
690static int nommu_init_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
691{
692 if (pt == NULL)
693 return -EINVAL;
694
695 pt->pt_ops = &nommu_pt_ops;
696 return 0;
697}
698
699static struct kgsl_pagetable *nommu_getpagetable(struct kgsl_mmu *mmu,
700 unsigned long name)
701{
702 struct kgsl_pagetable *pagetable;
703
704 pagetable = kgsl_get_pagetable(KGSL_MMU_GLOBAL_PT);
705
706 if (pagetable == NULL)
707 pagetable = kgsl_mmu_createpagetableobject(mmu,
708 KGSL_MMU_GLOBAL_PT);
709
710 return pagetable;
711}
712
713static int nommu_init(struct kgsl_mmu *mmu)
714{
715 mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
716 return 0;
717}
718
719static int nommu_probe(struct kgsl_device *device)
720{
721 /* NOMMU always exists */
722 return 0;
723}
724
725static struct kgsl_mmu_ops kgsl_nommu_ops = {
726 .mmu_init = nommu_init,
727 .mmu_add_global = nommu_add_global,
728 .mmu_remove_global = nommu_remove_global,
729 .mmu_init_pt = nommu_init_pt,
730 .mmu_getpagetable = nommu_getpagetable,
731 .probe = nommu_probe,
732};
733
734static struct {
735 const char *name;
736 unsigned int type;
737 struct kgsl_mmu_ops *ops;
738} kgsl_mmu_subtypes[] = {
739#ifdef CONFIG_QCOM_KGSL_IOMMU
740 { "iommu", KGSL_MMU_TYPE_IOMMU, &kgsl_iommu_ops },
741#endif
742 { "nommu", KGSL_MMU_TYPE_NONE, &kgsl_nommu_ops },
743};
744
745int kgsl_mmu_probe(struct kgsl_device *device, char *mmutype)
746{
747 struct kgsl_mmu *mmu = &device->mmu;
748 int ret, i;
749
750 if (mmutype != NULL) {
751 for (i = 0; i < ARRAY_SIZE(kgsl_mmu_subtypes); i++) {
752 if (strcmp(kgsl_mmu_subtypes[i].name, mmutype))
753 continue;
754
755 ret = kgsl_mmu_subtypes[i].ops->probe(device);
756
757 if (ret == 0) {
758 mmu->type = kgsl_mmu_subtypes[i].type;
759 mmu->mmu_ops = kgsl_mmu_subtypes[i].ops;
760
761 if (MMU_OP_VALID(mmu, mmu_init))
762 return mmu->mmu_ops->mmu_init(mmu);
763 }
764
765 return ret;
766 }
767
768 KGSL_CORE_ERR("mmu: MMU type '%s' unknown\n", mmutype);
769 }
770
771 for (i = 0; i < ARRAY_SIZE(kgsl_mmu_subtypes); i++) {
772 ret = kgsl_mmu_subtypes[i].ops->probe(device);
773
774 if (ret == 0) {
775 mmu->type = kgsl_mmu_subtypes[i].type;
776 mmu->mmu_ops = kgsl_mmu_subtypes[i].ops;
777
778 if (MMU_OP_VALID(mmu, mmu_init))
779 return mmu->mmu_ops->mmu_init(mmu);
780
781 return 0;
782 }
783 }
784
785 KGSL_CORE_ERR("mmu: couldn't detect any known MMU types\n");
786 return -ENODEV;
787}
788EXPORT_SYMBOL(kgsl_mmu_probe);