blob: a19d661a0e2d790759427ed6f4f088fec1de04d3 [file] [log] [blame]
Jordan Crouse286b1152020-12-30 16:30:50 +05301/* Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
Sanjay Yadav3c9f3402023-09-14 18:04:52 +05302 * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07003 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/spinlock.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/iommu.h>
20#include <linux/msm_kgsl.h>
21#include <linux/ratelimit.h>
22#include <linux/of_platform.h>
Jordan Crousef7f87a62019-09-11 08:32:15 -060023#include <linux/random.h>
Shrenuj Bansala419c792016-10-20 14:05:11 -070024#include <soc/qcom/scm.h>
25#include <soc/qcom/secure_buffer.h>
Shrenuj Bansala419c792016-10-20 14:05:11 -070026#include <linux/compat.h>
27
28#include "kgsl.h"
29#include "kgsl_device.h"
30#include "kgsl_mmu.h"
31#include "kgsl_sharedmem.h"
32#include "kgsl_iommu.h"
33#include "adreno_pm4types.h"
34#include "adreno.h"
35#include "kgsl_trace.h"
36#include "kgsl_pwrctrl.h"
37
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -070038#define CP_APERTURE_REG 0
Sunil Khatri82eb1ec2018-01-09 15:28:14 +053039#define CP_SMMU_APERTURE_ID 0x1B
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -070040
Shrenuj Bansala419c792016-10-20 14:05:11 -070041#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
42
Deepak Kumar756d6a92017-11-28 16:58:29 +053043#define ADDR_IN_GLOBAL(_mmu, _a) \
44 (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)) && \
45 ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) + \
46 KGSL_IOMMU_GLOBAL_MEM_SIZE)))
Shrenuj Bansala419c792016-10-20 14:05:11 -070047
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060048/*
49 * Flag to set SMMU memory attributes required to
50 * enable system cache for GPU transactions.
51 */
52#ifndef IOMMU_USE_UPSTREAM_HINT
53#define IOMMU_USE_UPSTREAM_HINT 0
54#endif
55
Shrenuj Bansala419c792016-10-20 14:05:11 -070056static struct kgsl_mmu_pt_ops iommu_pt_ops;
57static bool need_iommu_sync;
58
59const unsigned int kgsl_iommu_reg_list[KGSL_IOMMU_REG_MAX] = {
60 0x0,/* SCTLR */
61 0x20,/* TTBR0 */
62 0x34,/* CONTEXTIDR */
63 0x58,/* FSR */
64 0x60,/* FAR_0 */
65 0x618,/* TLBIALL */
66 0x008,/* RESUME */
67 0x68,/* FSYNR0 */
68 0x6C,/* FSYNR1 */
69 0x7F0,/* TLBSYNC */
70 0x7F4,/* TLBSTATUS */
71};
72
73/*
74 * struct kgsl_iommu_addr_entry - entry in the kgsl_iommu_pt rbtree.
75 * @base: starting virtual address of the entry
76 * @size: size of the entry
77 * @node: the rbtree node
78 *
79 */
80struct kgsl_iommu_addr_entry {
81 uint64_t base;
82 uint64_t size;
83 struct rb_node node;
84};
85
86static struct kmem_cache *addr_entry_cache;
87
88/*
89 * There are certain memory allocations (ringbuffer, memstore, etc) that need to
90 * be present at the same address in every pagetable. We call these "global"
91 * pagetable entries. There are relatively few of these and they are mostly
92 * stable (defined at init time) but the actual number of globals can differ
93 * slight depending on the target and implementation.
94 *
95 * Here we define an array and a simple allocator to keep track of the currently
96 * active global entries. Each entry is assigned a unique address inside of a
Jordan Crouse49967ff2019-09-09 10:41:36 -060097 * MMU implementation specific "global" region. We use a simple bitmap based
98 * allocator for the region to allow for both fixed and dynamic addressing.
Shrenuj Bansala419c792016-10-20 14:05:11 -070099 */
100
101#define GLOBAL_PT_ENTRIES 32
102
103struct global_pt_entry {
104 struct kgsl_memdesc *memdesc;
105 char name[32];
106};
107
Jordan Crouse49967ff2019-09-09 10:41:36 -0600108#define GLOBAL_MAP_PAGES (KGSL_IOMMU_GLOBAL_MEM_SIZE >> PAGE_SHIFT)
109
Shrenuj Bansala419c792016-10-20 14:05:11 -0700110static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES];
Jordan Crouse49967ff2019-09-09 10:41:36 -0600111static DECLARE_BITMAP(global_map, GLOBAL_MAP_PAGES);
112
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600113static int secure_global_size;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700114static int global_pt_count;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700115static struct kgsl_memdesc gpu_qdss_desc;
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700116static struct kgsl_memdesc gpu_qtimer_desc;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700117
118void kgsl_print_global_pt_entries(struct seq_file *s)
119{
120 int i;
121
122 for (i = 0; i < global_pt_count; i++) {
123 struct kgsl_memdesc *memdesc = global_pt_entries[i].memdesc;
124
125 if (memdesc == NULL)
126 continue;
127
Hareesh Gundu1fbd9062017-11-01 18:47:45 +0530128 seq_printf(s, "0x%pK-0x%pK %16llu %s\n",
129 (uint64_t *)(uintptr_t) memdesc->gpuaddr,
130 (uint64_t *)(uintptr_t) (memdesc->gpuaddr +
131 memdesc->size - 1), memdesc->size,
132 global_pt_entries[i].name);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700133 }
134}
135
136static void kgsl_iommu_unmap_globals(struct kgsl_pagetable *pagetable)
137{
138 unsigned int i;
139
140 for (i = 0; i < global_pt_count; i++) {
141 if (global_pt_entries[i].memdesc != NULL)
142 kgsl_mmu_unmap(pagetable,
143 global_pt_entries[i].memdesc);
144 }
145}
146
147static int kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable)
148{
149 unsigned int i;
150
151 for (i = 0; i < global_pt_count; i++) {
152 if (global_pt_entries[i].memdesc != NULL) {
153 int ret = kgsl_mmu_map(pagetable,
154 global_pt_entries[i].memdesc);
155
156 if (ret)
157 return ret;
158 }
159 }
160
161 return 0;
162}
163
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600164void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device,
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700165 struct kgsl_memdesc *memdesc)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700166{
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700167 if (!kgsl_mmu_is_secured(&device->mmu) || memdesc == NULL)
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600168 return;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700169
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700170 /* Check if an empty memdesc got passed in */
171 if ((memdesc->gpuaddr == 0) || (memdesc->size == 0))
172 return;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700173
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700174 if (memdesc->pagetable) {
175 if (memdesc->pagetable->name == KGSL_MMU_SECURE_PT)
176 kgsl_mmu_unmap(memdesc->pagetable, memdesc);
177 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700178}
179
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600180int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device,
181 struct kgsl_memdesc *entry)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700182{
183 int ret = 0;
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600184
185 if (!kgsl_mmu_is_secured(&device->mmu))
186 return -ENOTSUPP;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700187
188 if (entry != NULL) {
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600189 struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700190 entry->pagetable = pagetable;
Deepak Kumar756d6a92017-11-28 16:58:29 +0530191 entry->gpuaddr = KGSL_IOMMU_SECURE_BASE(&device->mmu) +
192 secure_global_size;
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600193
Shrenuj Bansala419c792016-10-20 14:05:11 -0700194 ret = kgsl_mmu_map(pagetable, entry);
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600195 if (ret == 0)
196 secure_global_size += entry->size;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700197 }
198 return ret;
199}
200
201static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu,
202 struct kgsl_memdesc *memdesc)
203{
204 int i;
205
206 if (memdesc->gpuaddr == 0 || !(memdesc->priv & KGSL_MEMDESC_GLOBAL))
207 return;
208
209 for (i = 0; i < global_pt_count; i++) {
210 if (global_pt_entries[i].memdesc == memdesc) {
Jordan Crouse49967ff2019-09-09 10:41:36 -0600211 u64 offset = memdesc->gpuaddr -
212 KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
213
214 bitmap_clear(global_map, offset >> PAGE_SHIFT,
215 kgsl_memdesc_footprint(memdesc) >> PAGE_SHIFT);
216
Shrenuj Bansala419c792016-10-20 14:05:11 -0700217 memdesc->gpuaddr = 0;
218 memdesc->priv &= ~KGSL_MEMDESC_GLOBAL;
219 global_pt_entries[i].memdesc = NULL;
220 return;
221 }
222 }
223}
224
225static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
226 struct kgsl_memdesc *memdesc, const char *name)
227{
Deepak Kumar76704a82021-02-11 14:30:55 +0530228 u32 bit;
Jordan Crouse49967ff2019-09-09 10:41:36 -0600229 u64 size = kgsl_memdesc_footprint(memdesc);
Deepak Kumar76704a82021-02-11 14:30:55 +0530230 int start = 0;
Jordan Crouse49967ff2019-09-09 10:41:36 -0600231
Shrenuj Bansala419c792016-10-20 14:05:11 -0700232 if (memdesc->gpuaddr != 0)
233 return;
234
Jordan Crouse49967ff2019-09-09 10:41:36 -0600235 if (WARN_ON(global_pt_count >= GLOBAL_PT_ENTRIES))
Shrenuj Bansala419c792016-10-20 14:05:11 -0700236 return;
237
Jordan Crousef7f87a62019-09-11 08:32:15 -0600238 if (WARN_ON(size > KGSL_IOMMU_GLOBAL_MEM_SIZE))
239 return;
Jordan Crouse49967ff2019-09-09 10:41:36 -0600240
Jordan Crousef7f87a62019-09-11 08:32:15 -0600241 if (memdesc->priv & KGSL_MEMDESC_RANDOM) {
242 u32 range = GLOBAL_MAP_PAGES - (size >> PAGE_SHIFT);
243
244 start = get_random_int() % range;
245 }
246
247 while (start >= 0) {
248 bit = bitmap_find_next_zero_area(global_map, GLOBAL_MAP_PAGES,
249 start, size >> PAGE_SHIFT, 0);
250
251 if (bit < GLOBAL_MAP_PAGES)
252 break;
253
254 start--;
255 }
256
257 if (WARN_ON(start < 0))
Jordan Crouse49967ff2019-09-09 10:41:36 -0600258 return;
259
260 memdesc->gpuaddr =
261 KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + (bit << PAGE_SHIFT);
262
263 bitmap_set(global_map, bit, size >> PAGE_SHIFT);
Deepak Kumar756d6a92017-11-28 16:58:29 +0530264
Shrenuj Bansala419c792016-10-20 14:05:11 -0700265 memdesc->priv |= KGSL_MEMDESC_GLOBAL;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700266
267 global_pt_entries[global_pt_count].memdesc = memdesc;
268 strlcpy(global_pt_entries[global_pt_count].name, name,
269 sizeof(global_pt_entries[global_pt_count].name));
270 global_pt_count++;
271}
272
Shrenuj Bansala419c792016-10-20 14:05:11 -0700273struct kgsl_memdesc *kgsl_iommu_get_qdss_global_entry(void)
274{
275 return &gpu_qdss_desc;
276}
277
278static void kgsl_setup_qdss_desc(struct kgsl_device *device)
279{
280 int result = 0;
281 uint32_t gpu_qdss_entry[2];
282
283 if (!of_find_property(device->pdev->dev.of_node,
284 "qcom,gpu-qdss-stm", NULL))
285 return;
286
287 if (of_property_read_u32_array(device->pdev->dev.of_node,
288 "qcom,gpu-qdss-stm", gpu_qdss_entry, 2)) {
289 KGSL_CORE_ERR("Failed to read gpu qdss dts entry\n");
290 return;
291 }
292
Lynus Vaz90d98b52018-04-09 14:45:36 +0530293 kgsl_memdesc_init(device, &gpu_qdss_desc, 0);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700294 gpu_qdss_desc.priv = 0;
295 gpu_qdss_desc.physaddr = gpu_qdss_entry[0];
296 gpu_qdss_desc.size = gpu_qdss_entry[1];
297 gpu_qdss_desc.pagetable = NULL;
298 gpu_qdss_desc.ops = NULL;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700299 gpu_qdss_desc.hostptr = NULL;
300
301 result = memdesc_sg_dma(&gpu_qdss_desc, gpu_qdss_desc.physaddr,
302 gpu_qdss_desc.size);
303 if (result) {
304 KGSL_CORE_ERR("memdesc_sg_dma failed: %d\n", result);
305 return;
306 }
307
308 kgsl_mmu_add_global(device, &gpu_qdss_desc, "gpu-qdss");
309}
310
311static inline void kgsl_cleanup_qdss_desc(struct kgsl_mmu *mmu)
312{
313 kgsl_iommu_remove_global(mmu, &gpu_qdss_desc);
314 kgsl_sharedmem_free(&gpu_qdss_desc);
315}
316
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700317struct kgsl_memdesc *kgsl_iommu_get_qtimer_global_entry(void)
318{
319 return &gpu_qtimer_desc;
320}
321
322static void kgsl_setup_qtimer_desc(struct kgsl_device *device)
323{
324 int result = 0;
325 uint32_t gpu_qtimer_entry[2];
326
327 if (!of_find_property(device->pdev->dev.of_node,
328 "qcom,gpu-qtimer", NULL))
329 return;
330
331 if (of_property_read_u32_array(device->pdev->dev.of_node,
332 "qcom,gpu-qtimer", gpu_qtimer_entry, 2)) {
333 KGSL_CORE_ERR("Failed to read gpu qtimer dts entry\n");
334 return;
335 }
336
Lynus Vaz90d98b52018-04-09 14:45:36 +0530337 kgsl_memdesc_init(device, &gpu_qtimer_desc, 0);
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700338 gpu_qtimer_desc.priv = 0;
339 gpu_qtimer_desc.physaddr = gpu_qtimer_entry[0];
340 gpu_qtimer_desc.size = gpu_qtimer_entry[1];
341 gpu_qtimer_desc.pagetable = NULL;
342 gpu_qtimer_desc.ops = NULL;
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700343 gpu_qtimer_desc.hostptr = NULL;
344
345 result = memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
346 gpu_qtimer_desc.size);
347 if (result) {
348 KGSL_CORE_ERR("memdesc_sg_dma failed: %d\n", result);
349 return;
350 }
351
352 kgsl_mmu_add_global(device, &gpu_qtimer_desc, "gpu-qtimer");
353}
354
355static inline void kgsl_cleanup_qtimer_desc(struct kgsl_mmu *mmu)
356{
357 kgsl_iommu_remove_global(mmu, &gpu_qtimer_desc);
358 kgsl_sharedmem_free(&gpu_qtimer_desc);
359}
Shrenuj Bansala419c792016-10-20 14:05:11 -0700360
361static inline void _iommu_sync_mmu_pc(bool lock)
362{
363 if (need_iommu_sync == false)
364 return;
365
366 if (lock)
367 mutex_lock(&kgsl_mmu_sync);
368 else
369 mutex_unlock(&kgsl_mmu_sync);
370}
371
372static void _detach_pt(struct kgsl_iommu_pt *iommu_pt,
373 struct kgsl_iommu_context *ctx)
374{
375 if (iommu_pt->attached) {
376 _iommu_sync_mmu_pc(true);
377 iommu_detach_device(iommu_pt->domain, ctx->dev);
378 _iommu_sync_mmu_pc(false);
379 iommu_pt->attached = false;
380 }
381}
382
383static int _attach_pt(struct kgsl_iommu_pt *iommu_pt,
384 struct kgsl_iommu_context *ctx)
385{
386 int ret;
387
388 if (iommu_pt->attached)
389 return 0;
390
391 _iommu_sync_mmu_pc(true);
392 ret = iommu_attach_device(iommu_pt->domain, ctx->dev);
393 _iommu_sync_mmu_pc(false);
394
395 if (ret == 0)
396 iommu_pt->attached = true;
397
398 return ret;
399}
400
Shrenuj Bansala419c792016-10-20 14:05:11 -0700401static int _iommu_map_sync_pc(struct kgsl_pagetable *pt,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700402 uint64_t gpuaddr, phys_addr_t physaddr,
403 uint64_t size, unsigned int flags)
404{
405 struct kgsl_iommu_pt *iommu_pt = pt->priv;
406 int ret;
407
Shrenuj Bansala419c792016-10-20 14:05:11 -0700408 _iommu_sync_mmu_pc(true);
409
410 ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr, size, flags);
411
412 _iommu_sync_mmu_pc(false);
413
Shrenuj Bansala419c792016-10-20 14:05:11 -0700414 if (ret) {
415 KGSL_CORE_ERR("map err: 0x%016llX, 0x%llx, 0x%x, %d\n",
416 gpuaddr, size, flags, ret);
417 return -ENODEV;
418 }
419
420 return 0;
421}
422
423static int _iommu_unmap_sync_pc(struct kgsl_pagetable *pt,
Carter Coopera1c7cce2017-12-15 13:29:29 -0700424 uint64_t addr, uint64_t size)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700425{
426 struct kgsl_iommu_pt *iommu_pt = pt->priv;
427 size_t unmapped = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700428
429 _iommu_sync_mmu_pc(true);
430
431 unmapped = iommu_unmap(iommu_pt->domain, addr, size);
432
433 _iommu_sync_mmu_pc(false);
434
Shrenuj Bansala419c792016-10-20 14:05:11 -0700435 if (unmapped != size) {
436 KGSL_CORE_ERR("unmap err: 0x%016llx, 0x%llx, %zd\n",
437 addr, size, unmapped);
438 return -ENODEV;
439 }
440
441 return 0;
442}
443
444static int _iommu_map_sg_offset_sync_pc(struct kgsl_pagetable *pt,
Carter Coopera1c7cce2017-12-15 13:29:29 -0700445 uint64_t addr, struct scatterlist *sg, int nents,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700446 uint64_t offset, uint64_t size, unsigned int flags)
447{
448 struct kgsl_iommu_pt *iommu_pt = pt->priv;
449 uint64_t offset_tmp = offset;
450 uint64_t size_tmp = size;
451 size_t mapped = 0;
452 unsigned int i;
453 struct scatterlist *s;
454 phys_addr_t physaddr;
455 int ret;
456
Shrenuj Bansala419c792016-10-20 14:05:11 -0700457 _iommu_sync_mmu_pc(true);
458
459 for_each_sg(sg, s, nents, i) {
460 /* Iterate until we find the offset */
461 if (offset_tmp >= s->length) {
462 offset_tmp -= s->length;
463 continue;
464 }
465
466 /* How much mapping is needed in this sg? */
467 if (size < s->length - offset_tmp)
468 size_tmp = size;
469 else
470 size_tmp = s->length - offset_tmp;
471
472 /* Get the phys addr for the offset page */
473 if (offset_tmp != 0) {
474 physaddr = page_to_phys(nth_page(sg_page(s),
475 offset_tmp >> PAGE_SHIFT));
476 /* Reset offset_tmp */
477 offset_tmp = 0;
478 } else
479 physaddr = page_to_phys(sg_page(s));
480
481 /* Do the map for this sg */
482 ret = iommu_map(iommu_pt->domain, addr + mapped,
483 physaddr, size_tmp, flags);
484 if (ret)
485 break;
486
487 mapped += size_tmp;
488 size -= size_tmp;
489
490 if (size == 0)
491 break;
492 }
493
494 _iommu_sync_mmu_pc(false);
495
Shrenuj Bansala419c792016-10-20 14:05:11 -0700496 if (size != 0) {
497 /* Cleanup on error */
Carter Coopera1c7cce2017-12-15 13:29:29 -0700498 _iommu_unmap_sync_pc(pt, addr, mapped);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700499 KGSL_CORE_ERR(
500 "map sg offset err: 0x%016llX, %d, %x, %zd\n",
501 addr, nents, flags, mapped);
502 return -ENODEV;
503 }
504
505 return 0;
506}
507
508static int _iommu_map_sg_sync_pc(struct kgsl_pagetable *pt,
Carter Coopera1c7cce2017-12-15 13:29:29 -0700509 uint64_t addr, struct scatterlist *sg, int nents,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700510 unsigned int flags)
511{
512 struct kgsl_iommu_pt *iommu_pt = pt->priv;
513 size_t mapped;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700514
515 _iommu_sync_mmu_pc(true);
516
517 mapped = iommu_map_sg(iommu_pt->domain, addr, sg, nents, flags);
518
519 _iommu_sync_mmu_pc(false);
520
Shrenuj Bansala419c792016-10-20 14:05:11 -0700521 if (mapped == 0) {
522 KGSL_CORE_ERR("map sg err: 0x%016llX, %d, %x, %zd\n",
523 addr, nents, flags, mapped);
524 return -ENODEV;
525 }
526
527 return 0;
528}
529
530/*
531 * One page allocation for a guard region to protect against over-zealous
532 * GPU pre-fetch
533 */
534
535static struct page *kgsl_guard_page;
536static struct kgsl_memdesc kgsl_secure_guard_page_memdesc;
537
538/*
539 * The dummy page is a placeholder/extra page to be used for sparse mappings.
540 * This page will be mapped to all virtual sparse bindings that are not
541 * physically backed.
542 */
543static struct page *kgsl_dummy_page;
544
545/* These functions help find the nearest allocated memory entries on either side
546 * of a faulting address. If we know the nearby allocations memory we can
547 * get a better determination of what we think should have been located in the
548 * faulting region
549 */
550
551/*
552 * A local structure to make it easy to store the interesting bits for the
553 * memory entries on either side of the faulting address
554 */
555
556struct _mem_entry {
557 uint64_t gpuaddr;
558 uint64_t size;
559 uint64_t flags;
560 unsigned int priv;
561 int pending_free;
562 pid_t pid;
563 char name[32];
564};
565
566static void _get_global_entries(uint64_t faultaddr,
567 struct _mem_entry *prev,
568 struct _mem_entry *next)
569{
570 int i;
571 uint64_t prevaddr = 0;
572 struct global_pt_entry *p = NULL;
573
574 uint64_t nextaddr = (uint64_t) -1;
575 struct global_pt_entry *n = NULL;
576
577 for (i = 0; i < global_pt_count; i++) {
578 uint64_t addr;
579
580 if (global_pt_entries[i].memdesc == NULL)
581 continue;
582
583 addr = global_pt_entries[i].memdesc->gpuaddr;
584 if ((addr < faultaddr) && (addr > prevaddr)) {
585 prevaddr = addr;
586 p = &global_pt_entries[i];
587 }
588
589 if ((addr > faultaddr) && (addr < nextaddr)) {
590 nextaddr = addr;
591 n = &global_pt_entries[i];
592 }
593 }
594
595 if (p != NULL) {
596 prev->gpuaddr = p->memdesc->gpuaddr;
597 prev->size = p->memdesc->size;
598 prev->flags = p->memdesc->flags;
599 prev->priv = p->memdesc->priv;
600 prev->pid = 0;
601 strlcpy(prev->name, p->name, sizeof(prev->name));
602 }
603
604 if (n != NULL) {
605 next->gpuaddr = n->memdesc->gpuaddr;
606 next->size = n->memdesc->size;
607 next->flags = n->memdesc->flags;
608 next->priv = n->memdesc->priv;
609 next->pid = 0;
610 strlcpy(next->name, n->name, sizeof(next->name));
611 }
612}
613
614void __kgsl_get_memory_usage(struct _mem_entry *entry)
615{
616 kgsl_get_memory_usage(entry->name, sizeof(entry->name), entry->flags);
617}
618
619static void _get_entries(struct kgsl_process_private *private,
620 uint64_t faultaddr, struct _mem_entry *prev,
621 struct _mem_entry *next)
622{
623 int id;
624 struct kgsl_mem_entry *entry;
625
626 uint64_t prevaddr = 0;
627 struct kgsl_mem_entry *p = NULL;
628
629 uint64_t nextaddr = (uint64_t) -1;
630 struct kgsl_mem_entry *n = NULL;
631
632 idr_for_each_entry(&private->mem_idr, entry, id) {
633 uint64_t addr = entry->memdesc.gpuaddr;
634
635 if ((addr < faultaddr) && (addr > prevaddr)) {
636 prevaddr = addr;
637 p = entry;
638 }
639
640 if ((addr > faultaddr) && (addr < nextaddr)) {
641 nextaddr = addr;
642 n = entry;
643 }
644 }
645
646 if (p != NULL) {
647 prev->gpuaddr = p->memdesc.gpuaddr;
648 prev->size = p->memdesc.size;
649 prev->flags = p->memdesc.flags;
650 prev->priv = p->memdesc.priv;
651 prev->pending_free = p->pending_free;
Archana Srirama9c943d2020-10-18 23:34:04 +0530652 prev->pid = pid_nr(private->pid);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700653 __kgsl_get_memory_usage(prev);
654 }
655
656 if (n != NULL) {
657 next->gpuaddr = n->memdesc.gpuaddr;
658 next->size = n->memdesc.size;
659 next->flags = n->memdesc.flags;
660 next->priv = n->memdesc.priv;
661 next->pending_free = n->pending_free;
Archana Srirama9c943d2020-10-18 23:34:04 +0530662 next->pid = pid_nr(private->pid);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700663 __kgsl_get_memory_usage(next);
664 }
665}
666
667static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
668 struct _mem_entry *preventry, struct _mem_entry *nextentry,
669 struct kgsl_context *context)
670{
671 struct kgsl_process_private *private;
672
673 memset(preventry, 0, sizeof(*preventry));
674 memset(nextentry, 0, sizeof(*nextentry));
675
676 /* Set the maximum possible size as an initial value */
677 nextentry->gpuaddr = (uint64_t) -1;
678
Deepak Kumar756d6a92017-11-28 16:58:29 +0530679 if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
Shrenuj Bansala419c792016-10-20 14:05:11 -0700680 _get_global_entries(faultaddr, preventry, nextentry);
681 } else if (context) {
682 private = context->proc_priv;
683 spin_lock(&private->mem_lock);
684 _get_entries(private, faultaddr, preventry, nextentry);
685 spin_unlock(&private->mem_lock);
686 }
687}
688
689static void _print_entry(struct kgsl_device *device, struct _mem_entry *entry)
690{
691 KGSL_LOG_DUMP(device,
692 "[%016llX - %016llX] %s %s (pid = %d) (%s)\n",
693 entry->gpuaddr,
694 entry->gpuaddr + entry->size,
695 entry->priv & KGSL_MEMDESC_GUARD_PAGE ? "(+guard)" : "",
696 entry->pending_free ? "(pending free)" : "",
697 entry->pid, entry->name);
698}
699
700static void _check_if_freed(struct kgsl_iommu_context *ctx,
701 uint64_t addr, pid_t ptname)
702{
703 uint64_t gpuaddr = addr;
704 uint64_t size = 0;
705 uint64_t flags = 0;
706 pid_t pid;
707
708 char name[32];
709
710 memset(name, 0, sizeof(name));
711
712 if (kgsl_memfree_find_entry(ptname, &gpuaddr, &size, &flags, &pid)) {
713 kgsl_get_memory_usage(name, sizeof(name) - 1, flags);
714 KGSL_LOG_DUMP(ctx->kgsldev, "---- premature free ----\n");
715 KGSL_LOG_DUMP(ctx->kgsldev,
716 "[%8.8llX-%8.8llX] (%s) was already freed by pid %d\n",
717 gpuaddr, gpuaddr + size, name, pid);
718 }
719}
720
721static bool
722kgsl_iommu_uche_overfetch(struct kgsl_process_private *private,
723 uint64_t faultaddr)
724{
725 int id;
726 struct kgsl_mem_entry *entry = NULL;
727
728 spin_lock(&private->mem_lock);
729 idr_for_each_entry(&private->mem_idr, entry, id) {
730 struct kgsl_memdesc *m = &entry->memdesc;
731
732 if ((faultaddr >= (m->gpuaddr + m->size))
733 && (faultaddr < (m->gpuaddr + m->size + 64))) {
734 spin_unlock(&private->mem_lock);
735 return true;
736 }
737 }
738 spin_unlock(&private->mem_lock);
739 return false;
740}
741
742/*
743 * Read pagefaults where the faulting address lies within the first 64 bytes
744 * of a page (UCHE line size is 64 bytes) and the fault page is preceded by a
745 * valid allocation are considered likely due to UCHE overfetch and suppressed.
746 */
747
748static bool kgsl_iommu_suppress_pagefault(uint64_t faultaddr, int write,
749 struct kgsl_context *context)
750{
751 /*
752 * If there is no context associated with the pagefault then this
753 * could be a fault on a global buffer. We do not suppress faults
754 * on global buffers as they are mainly accessed by the CP bypassing
755 * the UCHE. Also, write pagefaults are never suppressed.
756 */
757 if (!context || write)
758 return false;
759
760 return kgsl_iommu_uche_overfetch(context->proc_priv, faultaddr);
761}
762
763static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
764 struct device *dev, unsigned long addr, int flags, void *token)
765{
766 int ret = 0;
767 struct kgsl_pagetable *pt = token;
768 struct kgsl_mmu *mmu = pt->mmu;
769 struct kgsl_iommu *iommu;
770 struct kgsl_iommu_context *ctx;
771 u64 ptbase;
772 u32 contextidr;
Lynus Vaze0a01312017-11-08 19:39:31 +0530773 pid_t pid = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700774 pid_t ptname;
775 struct _mem_entry prev, next;
776 int write;
777 struct kgsl_device *device;
778 struct adreno_device *adreno_dev;
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530779 struct adreno_gpudev *gpudev;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700780 unsigned int no_page_fault_log = 0;
781 unsigned int curr_context_id = 0;
782 struct kgsl_context *context;
783 char *fault_type = "unknown";
784
785 static DEFINE_RATELIMIT_STATE(_rs,
786 DEFAULT_RATELIMIT_INTERVAL,
787 DEFAULT_RATELIMIT_BURST);
788
789 if (mmu == NULL)
790 return ret;
791
792 iommu = _IOMMU_PRIV(mmu);
793 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
794 device = KGSL_MMU_DEVICE(mmu);
795 adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530796 gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700797
798 if (pt->name == KGSL_MMU_SECURE_PT)
799 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
800
801 /*
802 * set the fault bits and stuff before any printks so that if fault
803 * handler runs then it will know it's dealing with a pagefault.
804 * Read the global current timestamp because we could be in middle of
805 * RB switch and hence the cur RB may not be reliable but global
806 * one will always be reliable
807 */
808 kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
809 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
810
811 context = kgsl_context_get(device, curr_context_id);
812
813 write = (flags & IOMMU_FAULT_WRITE) ? 1 : 0;
814 if (flags & IOMMU_FAULT_TRANSLATION)
815 fault_type = "translation";
816 else if (flags & IOMMU_FAULT_PERMISSION)
817 fault_type = "permission";
Deepak Kumar8267e992018-04-26 11:16:55 +0530818 else if (flags & IOMMU_FAULT_EXTERNAL)
819 fault_type = "external";
820 else if (flags & IOMMU_FAULT_TRANSACTION_STALLED)
821 fault_type = "transaction stalled";
Shrenuj Bansala419c792016-10-20 14:05:11 -0700822
823 if (kgsl_iommu_suppress_pagefault(addr, write, context)) {
824 iommu->pagefault_suppression_count++;
825 kgsl_context_put(context);
826 return ret;
827 }
828
829 if (context != NULL) {
830 /* save pagefault timestamp for GFT */
831 set_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, &context->priv);
Archana Srirama9c943d2020-10-18 23:34:04 +0530832 pid = pid_nr(context->proc_priv->pid);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700833 }
834
835 ctx->fault = 1;
836
837 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
838 &adreno_dev->ft_pf_policy) &&
839 (flags & IOMMU_FAULT_TRANSACTION_STALLED)) {
840 /*
841 * Turn off GPU IRQ so we don't get faults from it too.
842 * The device mutex must be held to change power state
843 */
844 mutex_lock(&device->mutex);
845 kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
846 mutex_unlock(&device->mutex);
847 }
848
849 ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
850 contextidr = KGSL_IOMMU_GET_CTX_REG(ctx, CONTEXTIDR);
851
852 ptname = MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ?
Lynus Vaze0a01312017-11-08 19:39:31 +0530853 KGSL_MMU_GLOBAL_PT : pid;
Sunil Khatri86e95682017-01-23 17:10:32 +0530854 /*
855 * Trace needs to be logged before searching the faulting
856 * address in free list as it takes quite long time in
857 * search and delays the trace unnecessarily.
858 */
859 trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
860 ptname, write ? "write" : "read");
Shrenuj Bansala419c792016-10-20 14:05:11 -0700861
862 if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE,
863 &adreno_dev->ft_pf_policy))
864 no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
865
866 if (!no_page_fault_log && __ratelimit(&_rs)) {
Rajesh Kemisettic05883a2018-09-17 11:34:08 +0530867 const char *api_str;
868
869 if (context != NULL) {
870 struct adreno_context *drawctxt =
871 ADRENO_CONTEXT(context);
872
873 api_str = get_api_type_str(drawctxt->type);
874 } else
875 api_str = "UNKNOWN";
876
Shrenuj Bansala419c792016-10-20 14:05:11 -0700877 KGSL_MEM_CRIT(ctx->kgsldev,
878 "GPU PAGE FAULT: addr = %lX pid= %d\n", addr, ptname);
879 KGSL_MEM_CRIT(ctx->kgsldev,
Rajesh Kemisettic05883a2018-09-17 11:34:08 +0530880 "context=%s ctx_type=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
881 ctx->name, api_str, ptbase, contextidr,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700882 write ? "write" : "read", fault_type);
883
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530884 if (gpudev->iommu_fault_block) {
885 unsigned int fsynr1;
886
887 fsynr1 = KGSL_IOMMU_GET_CTX_REG(ctx, FSYNR1);
888 KGSL_MEM_CRIT(ctx->kgsldev,
889 "FAULTING BLOCK: %s\n",
890 gpudev->iommu_fault_block(adreno_dev,
891 fsynr1));
892 }
893
Shrenuj Bansala419c792016-10-20 14:05:11 -0700894 /* Don't print the debug if this is a permissions fault */
895 if (!(flags & IOMMU_FAULT_PERMISSION)) {
896 _check_if_freed(ctx, addr, ptname);
897
898 KGSL_LOG_DUMP(ctx->kgsldev,
899 "---- nearby memory ----\n");
900
901 _find_mem_entries(mmu, addr, &prev, &next, context);
902 if (prev.gpuaddr)
903 _print_entry(ctx->kgsldev, &prev);
904 else
905 KGSL_LOG_DUMP(ctx->kgsldev, "*EMPTY*\n");
906
907 KGSL_LOG_DUMP(ctx->kgsldev, " <- fault @ %8.8lX\n",
908 addr);
909
910 if (next.gpuaddr != (uint64_t) -1)
911 _print_entry(ctx->kgsldev, &next);
912 else
913 KGSL_LOG_DUMP(ctx->kgsldev, "*EMPTY*\n");
914 }
915 }
916
Shrenuj Bansala419c792016-10-20 14:05:11 -0700917
918 /*
919 * We do not want the h/w to resume fetching data from an iommu
920 * that has faulted, this is better for debugging as it will stall
921 * the GPU and trigger a snapshot. Return EBUSY error.
922 */
923 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
924 &adreno_dev->ft_pf_policy) &&
925 (flags & IOMMU_FAULT_TRANSACTION_STALLED)) {
926 uint32_t sctlr_val;
927
928 ret = -EBUSY;
929 /*
930 * Disable context fault interrupts
931 * as we do not clear FSR in the ISR.
932 * Will be re-enabled after FSR is cleared.
933 */
934 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
935 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
936 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
937
938 adreno_set_gpu_fault(adreno_dev, ADRENO_IOMMU_PAGE_FAULT);
939 /* Go ahead with recovery*/
940 adreno_dispatcher_schedule(device);
941 }
942
943 kgsl_context_put(context);
944 return ret;
945}
946
947/*
948 * kgsl_iommu_disable_clk() - Disable iommu clocks
949 * Disable IOMMU clocks
950 */
951static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
952{
953 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
954 int j;
955
956 atomic_dec(&iommu->clk_enable_count);
957
958 /*
959 * Make sure the clk refcounts are good. An unbalance may
960 * cause the clocks to be off when we need them on.
961 */
962 WARN_ON(atomic_read(&iommu->clk_enable_count) < 0);
963
964 for (j = (KGSL_IOMMU_MAX_CLKS - 1); j >= 0; j--)
965 if (iommu->clks[j])
966 clk_disable_unprepare(iommu->clks[j]);
967}
968
969/*
970 * kgsl_iommu_enable_clk_prepare_enable - Enable the specified IOMMU clock
971 * Try 4 times to enable it and then BUG() for debug
972 */
973static void kgsl_iommu_clk_prepare_enable(struct clk *clk)
974{
975 int num_retries = 4;
976
977 while (num_retries--) {
978 if (!clk_prepare_enable(clk))
979 return;
980 }
981
982 /* Failure is fatal so BUG() to facilitate debug */
983 KGSL_CORE_ERR("IOMMU clock enable failed\n");
984 BUG();
985}
986
987/*
988 * kgsl_iommu_enable_clk - Enable iommu clocks
989 * Enable all the IOMMU clocks
990 */
991static void kgsl_iommu_enable_clk(struct kgsl_mmu *mmu)
992{
993 int j;
994 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
995
996 for (j = 0; j < KGSL_IOMMU_MAX_CLKS; j++) {
997 if (iommu->clks[j])
998 kgsl_iommu_clk_prepare_enable(iommu->clks[j]);
999 }
1000 atomic_inc(&iommu->clk_enable_count);
1001}
1002
1003/* kgsl_iommu_get_ttbr0 - Get TTBR0 setting for a pagetable */
1004static u64 kgsl_iommu_get_ttbr0(struct kgsl_pagetable *pt)
1005{
1006 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
1007
1008 BUG_ON(iommu_pt == NULL);
1009
1010 return iommu_pt->ttbr0;
1011}
1012
1013static bool kgsl_iommu_pt_equal(struct kgsl_mmu *mmu,
1014 struct kgsl_pagetable *pt,
1015 u64 ttbr0)
1016{
1017 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
1018 u64 domain_ttbr0;
1019
1020 if (iommu_pt == NULL)
1021 return 0;
1022
1023 domain_ttbr0 = kgsl_iommu_get_ttbr0(pt);
1024
1025 return (domain_ttbr0 == ttbr0);
1026}
1027
1028/* kgsl_iommu_get_contextidr - query CONTEXTIDR setting for a pagetable */
1029static u32 kgsl_iommu_get_contextidr(struct kgsl_pagetable *pt)
1030{
1031 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
1032
1033 BUG_ON(iommu_pt == NULL);
1034
1035 return iommu_pt->contextidr;
1036}
1037
1038/*
1039 * kgsl_iommu_destroy_pagetable - Free up reaources help by a pagetable
1040 * @mmu_specific_pt - Pointer to pagetable which is to be freed
1041 *
1042 * Return - void
1043 */
1044static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt)
1045{
1046 struct kgsl_iommu_pt *iommu_pt = pt->priv;
1047 struct kgsl_mmu *mmu = pt->mmu;
1048 struct kgsl_iommu *iommu;
1049 struct kgsl_iommu_context *ctx;
1050
1051 /*
1052 * Make sure all allocations are unmapped before destroying
1053 * the pagetable
1054 */
1055 WARN_ON(!list_empty(&pt->list));
1056
1057 iommu = _IOMMU_PRIV(mmu);
1058
1059 if (pt->name == KGSL_MMU_SECURE_PT) {
1060 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
Shrenuj Bansala419c792016-10-20 14:05:11 -07001061 } else {
1062 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1063 kgsl_iommu_unmap_globals(pt);
1064 }
1065
1066 if (iommu_pt->domain) {
1067 trace_kgsl_pagetable_destroy(iommu_pt->ttbr0, pt->name);
1068
1069 _detach_pt(iommu_pt, ctx);
1070
1071 iommu_domain_free(iommu_pt->domain);
1072 }
1073
1074 kfree(iommu_pt);
1075}
1076
1077static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
1078 struct kgsl_pagetable *pagetable,
1079 struct kgsl_iommu_pt *pt)
1080{
Shrenuj Bansala419c792016-10-20 14:05:11 -07001081 if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
Deepak Kumar756d6a92017-11-28 16:58:29 +05301082 pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1083 pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
1084 pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1085 pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001086 } else {
1087 pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301088 pt->compat_va_end = KGSL_IOMMU_SECURE_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001089 pt->va_start = KGSL_IOMMU_VA_BASE64;
1090 pt->va_end = KGSL_IOMMU_VA_END64;
1091 }
1092
1093 if (pagetable->name != KGSL_MMU_GLOBAL_PT &&
1094 pagetable->name != KGSL_MMU_SECURE_PT) {
Deepak Kumarcf056d12018-04-17 15:59:42 +05301095 if (kgsl_is_compat_task()) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07001096 pt->svm_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301097 pt->svm_end = KGSL_IOMMU_SECURE_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001098 } else {
1099 pt->svm_start = KGSL_IOMMU_SVM_BASE64;
1100 pt->svm_end = KGSL_IOMMU_SVM_END64;
1101 }
1102 }
1103}
1104
1105static void setup_32bit_pagetable(struct kgsl_mmu *mmu,
1106 struct kgsl_pagetable *pagetable,
1107 struct kgsl_iommu_pt *pt)
1108{
Shrenuj Bansala419c792016-10-20 14:05:11 -07001109 if (mmu->secured) {
1110 if (pagetable->name == KGSL_MMU_SECURE_PT) {
Deepak Kumar756d6a92017-11-28 16:58:29 +05301111 pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1112 pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
1113 pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1114 pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001115 } else {
1116 pt->va_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301117 pt->va_end = KGSL_IOMMU_SECURE_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001118 pt->compat_va_start = pt->va_start;
1119 pt->compat_va_end = pt->va_end;
1120 }
1121 } else {
1122 pt->va_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301123 pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001124 pt->compat_va_start = pt->va_start;
1125 pt->compat_va_end = pt->va_end;
1126 }
1127
1128 if (pagetable->name != KGSL_MMU_GLOBAL_PT &&
1129 pagetable->name != KGSL_MMU_SECURE_PT) {
1130 pt->svm_start = KGSL_IOMMU_SVM_BASE32;
1131 pt->svm_end = KGSL_IOMMU_SVM_END32;
1132 }
1133}
1134
1135
1136static struct kgsl_iommu_pt *
1137_alloc_pt(struct device *dev, struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1138{
1139 struct kgsl_iommu_pt *iommu_pt;
1140 struct bus_type *bus = kgsl_mmu_get_bus(dev);
1141
1142 if (bus == NULL)
1143 return ERR_PTR(-ENODEV);
1144
1145 iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
1146 if (iommu_pt == NULL)
1147 return ERR_PTR(-ENOMEM);
1148
1149 iommu_pt->domain = iommu_domain_alloc(bus);
1150 if (iommu_pt->domain == NULL) {
1151 kfree(iommu_pt);
1152 return ERR_PTR(-ENODEV);
1153 }
1154
1155 pt->pt_ops = &iommu_pt_ops;
1156 pt->priv = iommu_pt;
1157 pt->fault_addr = ~0ULL;
1158 iommu_pt->rbtree = RB_ROOT;
1159
1160 if (MMU_FEATURE(mmu, KGSL_MMU_64BIT))
1161 setup_64bit_pagetable(mmu, pt, iommu_pt);
1162 else
1163 setup_32bit_pagetable(mmu, pt, iommu_pt);
1164
1165
1166 return iommu_pt;
1167}
1168
1169static void _free_pt(struct kgsl_iommu_context *ctx, struct kgsl_pagetable *pt)
1170{
1171 struct kgsl_iommu_pt *iommu_pt = pt->priv;
1172
1173 pt->pt_ops = NULL;
1174 pt->priv = NULL;
1175
1176 if (iommu_pt == NULL)
1177 return;
1178
1179 _detach_pt(iommu_pt, ctx);
1180
1181 if (iommu_pt->domain != NULL)
1182 iommu_domain_free(iommu_pt->domain);
1183 kfree(iommu_pt);
1184}
1185
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001186void _enable_gpuhtw_llc(struct kgsl_mmu *mmu, struct kgsl_iommu_pt *iommu_pt)
1187{
1188 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
1189 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1190 int gpuhtw_llc_enable = 1;
1191 int ret;
1192
1193 /* GPU pagetable walk LLC slice not enabled */
1194 if (!adreno_dev->gpuhtw_llc_slice)
1195 return;
1196
1197 /* Domain attribute to enable system cache for GPU pagetable walks */
1198 ret = iommu_domain_set_attr(iommu_pt->domain,
1199 DOMAIN_ATTR_USE_UPSTREAM_HINT, &gpuhtw_llc_enable);
1200 /*
1201 * Warn that the system cache will not be used for GPU
1202 * pagetable walks. This is not a fatal error.
1203 */
1204 WARN_ONCE(ret,
1205 "System cache not enabled for GPU pagetable walks: %d\n", ret);
1206}
1207
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -07001208static int program_smmu_aperture(unsigned int cb, unsigned int aperture_reg)
1209{
1210 struct scm_desc desc = {0};
1211
1212 desc.args[0] = 0xFFFF0000 | ((aperture_reg & 0xff) << 8) | (cb & 0xff);
1213 desc.args[1] = 0xFFFFFFFF;
1214 desc.args[2] = 0xFFFFFFFF;
1215 desc.args[3] = 0xFFFFFFFF;
1216 desc.arginfo = SCM_ARGS(4);
1217
Sunil Khatri82eb1ec2018-01-09 15:28:14 +05301218 return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, CP_SMMU_APERTURE_ID), &desc);
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -07001219}
1220
Shrenuj Bansala419c792016-10-20 14:05:11 -07001221static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1222{
1223 int ret = 0;
1224 struct kgsl_iommu_pt *iommu_pt = NULL;
1225 unsigned int cb_num;
1226 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1227 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1228
1229 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1230
1231 if (IS_ERR(iommu_pt))
1232 return PTR_ERR(iommu_pt);
1233
1234 if (kgsl_mmu_is_perprocess(mmu)) {
1235 ret = iommu_domain_set_attr(iommu_pt->domain,
1236 DOMAIN_ATTR_PROCID, &pt->name);
1237 if (ret) {
1238 KGSL_CORE_ERR("set DOMAIN_ATTR_PROCID failed: %d\n",
1239 ret);
1240 goto done;
1241 }
1242 }
1243
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001244 _enable_gpuhtw_llc(mmu, iommu_pt);
1245
Shrenuj Bansala419c792016-10-20 14:05:11 -07001246 ret = _attach_pt(iommu_pt, ctx);
1247 if (ret)
1248 goto done;
1249
1250 iommu_set_fault_handler(iommu_pt->domain,
1251 kgsl_iommu_fault_handler, pt);
1252
1253 ret = iommu_domain_get_attr(iommu_pt->domain,
1254 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1255 if (ret) {
Shrenuj Bansalc3b15ce2017-06-15 14:48:05 -07001256 KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXT_BANK failed: %d\n",
Shrenuj Bansala419c792016-10-20 14:05:11 -07001257 ret);
1258 goto done;
1259 }
1260
Sunil Khatri82eb1ec2018-01-09 15:28:14 +05301261 if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) &&
1262 scm_is_call_available(SCM_SVC_MP, CP_SMMU_APERTURE_ID)) {
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -07001263 ret = program_smmu_aperture(cb_num, CP_APERTURE_REG);
1264 if (ret) {
1265 pr_err("SMMU aperture programming call failed with error %d\n",
1266 ret);
1267 return ret;
1268 }
1269 }
1270
Shrenuj Bansala419c792016-10-20 14:05:11 -07001271 ctx->cb_num = cb_num;
1272 ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
1273 + (cb_num << KGSL_IOMMU_CB_SHIFT);
1274
1275 ret = iommu_domain_get_attr(iommu_pt->domain,
1276 DOMAIN_ATTR_TTBR0, &iommu_pt->ttbr0);
1277 if (ret) {
1278 KGSL_CORE_ERR("get DOMAIN_ATTR_TTBR0 failed: %d\n",
1279 ret);
1280 goto done;
1281 }
1282 ret = iommu_domain_get_attr(iommu_pt->domain,
1283 DOMAIN_ATTR_CONTEXTIDR, &iommu_pt->contextidr);
1284 if (ret) {
1285 KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXTIDR failed: %d\n",
1286 ret);
1287 goto done;
1288 }
1289
1290 ret = kgsl_iommu_map_globals(pt);
1291
1292done:
1293 if (ret)
1294 _free_pt(ctx, pt);
1295
1296 return ret;
1297}
1298
1299static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1300{
1301 int ret = 0;
1302 struct kgsl_iommu_pt *iommu_pt = NULL;
1303 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1304 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
1305 int secure_vmid = VMID_CP_PIXEL;
1306 unsigned int cb_num;
1307
1308 if (!mmu->secured)
1309 return -EPERM;
1310
1311 if (!MMU_FEATURE(mmu, KGSL_MMU_HYP_SECURE_ALLOC)) {
1312 if (!kgsl_mmu_bus_secured(ctx->dev))
1313 return -EPERM;
1314 }
1315
1316 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1317
1318 if (IS_ERR(iommu_pt))
1319 return PTR_ERR(iommu_pt);
1320
1321 ret = iommu_domain_set_attr(iommu_pt->domain,
1322 DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
1323 if (ret) {
1324 KGSL_CORE_ERR("set DOMAIN_ATTR_SECURE_VMID failed: %d\n", ret);
1325 goto done;
1326 }
1327
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001328 _enable_gpuhtw_llc(mmu, iommu_pt);
1329
Shrenuj Bansala419c792016-10-20 14:05:11 -07001330 ret = _attach_pt(iommu_pt, ctx);
1331
1332 if (MMU_FEATURE(mmu, KGSL_MMU_HYP_SECURE_ALLOC))
1333 iommu_set_fault_handler(iommu_pt->domain,
1334 kgsl_iommu_fault_handler, pt);
1335
1336 ret = iommu_domain_get_attr(iommu_pt->domain,
1337 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1338 if (ret) {
1339 KGSL_CORE_ERR("get DOMAIN_ATTR_PROCID failed: %d\n",
1340 ret);
1341 goto done;
1342 }
1343
1344 ctx->cb_num = cb_num;
1345 ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
1346 + (cb_num << KGSL_IOMMU_CB_SHIFT);
1347
Shrenuj Bansala419c792016-10-20 14:05:11 -07001348done:
1349 if (ret)
1350 _free_pt(ctx, pt);
1351 return ret;
1352}
1353
1354static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1355{
1356 int ret = 0;
1357 struct kgsl_iommu_pt *iommu_pt = NULL;
1358 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1359 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1360 int dynamic = 1;
1361 unsigned int cb_num = ctx->cb_num;
1362
1363 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1364
1365 if (IS_ERR(iommu_pt))
1366 return PTR_ERR(iommu_pt);
1367
1368 ret = iommu_domain_set_attr(iommu_pt->domain,
1369 DOMAIN_ATTR_DYNAMIC, &dynamic);
1370 if (ret) {
1371 KGSL_CORE_ERR("set DOMAIN_ATTR_DYNAMIC failed: %d\n", ret);
1372 goto done;
1373 }
1374 ret = iommu_domain_set_attr(iommu_pt->domain,
1375 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1376 if (ret) {
1377 KGSL_CORE_ERR("set DOMAIN_ATTR_CONTEXT_BANK failed: %d\n", ret);
1378 goto done;
1379 }
1380
1381 ret = iommu_domain_set_attr(iommu_pt->domain,
1382 DOMAIN_ATTR_PROCID, &pt->name);
1383 if (ret) {
1384 KGSL_CORE_ERR("set DOMAIN_ATTR_PROCID failed: %d\n", ret);
1385 goto done;
1386 }
1387
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001388 _enable_gpuhtw_llc(mmu, iommu_pt);
1389
Shrenuj Bansala419c792016-10-20 14:05:11 -07001390 ret = _attach_pt(iommu_pt, ctx);
1391 if (ret)
1392 goto done;
1393
1394 /* now read back the attributes needed for self programming */
1395 ret = iommu_domain_get_attr(iommu_pt->domain,
1396 DOMAIN_ATTR_TTBR0, &iommu_pt->ttbr0);
1397 if (ret) {
1398 KGSL_CORE_ERR("get DOMAIN_ATTR_TTBR0 failed: %d\n", ret);
1399 goto done;
1400 }
1401
1402 ret = iommu_domain_get_attr(iommu_pt->domain,
1403 DOMAIN_ATTR_CONTEXTIDR, &iommu_pt->contextidr);
1404 if (ret) {
1405 KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXTIDR failed: %d\n", ret);
1406 goto done;
1407 }
1408
1409 ret = kgsl_iommu_map_globals(pt);
1410
1411done:
1412 if (ret)
1413 _free_pt(ctx, pt);
1414
1415 return ret;
1416}
1417
1418/* kgsl_iommu_init_pt - Set up an IOMMU pagetable */
1419static int kgsl_iommu_init_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1420{
1421 if (pt == NULL)
1422 return -EINVAL;
1423
1424 switch (pt->name) {
1425 case KGSL_MMU_GLOBAL_PT:
1426 return _init_global_pt(mmu, pt);
1427
1428 case KGSL_MMU_SECURE_PT:
1429 return _init_secure_pt(mmu, pt);
1430
1431 default:
1432 return _init_per_process_pt(mmu, pt);
1433 }
1434}
1435
1436static struct kgsl_pagetable *kgsl_iommu_getpagetable(struct kgsl_mmu *mmu,
1437 unsigned long name)
1438{
1439 struct kgsl_pagetable *pt;
1440
1441 if (!kgsl_mmu_is_perprocess(mmu) && (name != KGSL_MMU_SECURE_PT)) {
1442 name = KGSL_MMU_GLOBAL_PT;
1443 if (mmu->defaultpagetable != NULL)
1444 return mmu->defaultpagetable;
1445 }
1446
1447 pt = kgsl_get_pagetable(name);
1448 if (pt == NULL)
1449 pt = kgsl_mmu_createpagetableobject(mmu, name);
1450
1451 return pt;
1452}
1453
1454/*
1455 * kgsl_iommu_get_reg_ahbaddr - Returns the ahb address of the register
1456 * @mmu - Pointer to mmu structure
1457 * @id - The context ID of the IOMMU ctx
1458 * @reg - The register for which address is required
1459 *
1460 * Return - The address of register which can be used in type0 packet
1461 */
1462static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
1463 int id, unsigned int reg)
1464{
1465 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1466 struct kgsl_iommu_context *ctx = &iommu->ctx[id];
1467
1468 return ctx->gpu_offset + kgsl_iommu_reg_list[reg];
1469}
1470
1471static void _detach_context(struct kgsl_iommu_context *ctx)
1472{
1473 struct kgsl_iommu_pt *iommu_pt;
1474
1475 if (ctx->default_pt == NULL)
1476 return;
1477
1478 iommu_pt = ctx->default_pt->priv;
1479
1480 _detach_pt(iommu_pt, ctx);
1481
1482 ctx->default_pt = NULL;
1483}
1484
1485static void kgsl_iommu_close(struct kgsl_mmu *mmu)
1486{
1487 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1488 int i;
1489
1490 for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
1491 _detach_context(&iommu->ctx[i]);
1492
1493 kgsl_mmu_putpagetable(mmu->defaultpagetable);
1494 mmu->defaultpagetable = NULL;
1495
1496 kgsl_mmu_putpagetable(mmu->securepagetable);
1497 mmu->securepagetable = NULL;
1498
1499 if (iommu->regbase != NULL)
1500 iounmap(iommu->regbase);
1501
1502 kgsl_sharedmem_free(&kgsl_secure_guard_page_memdesc);
1503
1504 if (kgsl_guard_page != NULL) {
1505 __free_page(kgsl_guard_page);
1506 kgsl_guard_page = NULL;
1507 }
1508
1509 if (kgsl_dummy_page != NULL) {
1510 __free_page(kgsl_dummy_page);
1511 kgsl_dummy_page = NULL;
1512 }
1513
1514 kgsl_iommu_remove_global(mmu, &iommu->setstate);
1515 kgsl_sharedmem_free(&iommu->setstate);
1516 kgsl_cleanup_qdss_desc(mmu);
Jonathan Wicks4892d8d2017-02-24 16:21:26 -07001517 kgsl_cleanup_qtimer_desc(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001518}
1519
1520static int _setstate_alloc(struct kgsl_device *device,
1521 struct kgsl_iommu *iommu)
1522{
1523 int ret;
1524
Lynus Vaz90d98b52018-04-09 14:45:36 +05301525 kgsl_memdesc_init(device, &iommu->setstate, 0);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001526 ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, PAGE_SIZE);
1527
1528 if (!ret) {
1529 /* Mark the setstate memory as read only */
1530 iommu->setstate.flags |= KGSL_MEMFLAGS_GPUREADONLY;
1531
1532 kgsl_sharedmem_set(device, &iommu->setstate, 0, 0, PAGE_SIZE);
1533 }
1534
1535 return ret;
1536}
1537
1538static int kgsl_iommu_init(struct kgsl_mmu *mmu)
1539{
1540 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
1541 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1542 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1543 int status;
1544
1545 mmu->features |= KGSL_MMU_PAGED;
1546
1547 if (ctx->name == NULL) {
1548 KGSL_CORE_ERR("dt: gfx3d0_user context bank not found\n");
1549 return -EINVAL;
1550 }
1551
1552 status = _setstate_alloc(device, iommu);
1553 if (status)
1554 return status;
1555
1556 /* check requirements for per process pagetables */
1557 if (ctx->gpu_offset == UINT_MAX) {
1558 KGSL_CORE_ERR("missing qcom,gpu-offset forces global pt\n");
1559 mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
1560 }
1561
1562 if (iommu->version == 1 && iommu->micro_mmu_ctrl == UINT_MAX) {
1563 KGSL_CORE_ERR(
1564 "missing qcom,micro-mmu-control forces global pt\n");
1565 mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
1566 }
1567
1568 /* Check to see if we need to do the IOMMU sync dance */
1569 need_iommu_sync = of_property_read_bool(device->pdev->dev.of_node,
1570 "qcom,gpu-quirk-iommu-sync");
1571
1572 iommu->regbase = ioremap(iommu->regstart, iommu->regsize);
1573 if (iommu->regbase == NULL) {
1574 KGSL_CORE_ERR("Could not map IOMMU registers 0x%lx:0x%x\n",
1575 iommu->regstart, iommu->regsize);
1576 status = -ENOMEM;
1577 goto done;
1578 }
1579
1580 if (addr_entry_cache == NULL) {
1581 addr_entry_cache = KMEM_CACHE(kgsl_iommu_addr_entry, 0);
1582 if (addr_entry_cache == NULL) {
1583 status = -ENOMEM;
1584 goto done;
1585 }
1586 }
1587
1588 kgsl_iommu_add_global(mmu, &iommu->setstate, "setstate");
1589 kgsl_setup_qdss_desc(device);
Jonathan Wicks4892d8d2017-02-24 16:21:26 -07001590 kgsl_setup_qtimer_desc(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001591
Harshdeep Dhatt1f408332017-03-27 11:35:13 -06001592 if (!mmu->secured)
1593 goto done;
1594
1595 mmu->securepagetable = kgsl_mmu_getpagetable(mmu,
1596 KGSL_MMU_SECURE_PT);
1597 if (IS_ERR(mmu->securepagetable)) {
1598 status = PTR_ERR(mmu->securepagetable);
1599 mmu->securepagetable = NULL;
1600 } else if (mmu->securepagetable == NULL) {
1601 status = -ENOMEM;
1602 }
1603
Shrenuj Bansala419c792016-10-20 14:05:11 -07001604done:
1605 if (status)
1606 kgsl_iommu_close(mmu);
1607
1608 return status;
1609}
1610
1611static int _setup_user_context(struct kgsl_mmu *mmu)
1612{
1613 int ret = 0;
1614 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1615 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1616 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
1617 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1618 struct kgsl_iommu_pt *iommu_pt = NULL;
1619 unsigned int sctlr_val;
1620
1621 if (mmu->defaultpagetable == NULL) {
1622 mmu->defaultpagetable = kgsl_mmu_getpagetable(mmu,
1623 KGSL_MMU_GLOBAL_PT);
1624 /* if we don't have a default pagetable, nothing will work */
1625 if (IS_ERR(mmu->defaultpagetable)) {
1626 ret = PTR_ERR(mmu->defaultpagetable);
1627 mmu->defaultpagetable = NULL;
1628 return ret;
Lynus Vaza2e31112017-04-17 18:29:58 +05301629 } else if (mmu->defaultpagetable == NULL) {
1630 return -ENOMEM;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001631 }
1632 }
1633
1634 iommu_pt = mmu->defaultpagetable->priv;
1635 if (iommu_pt == NULL)
1636 return -ENODEV;
1637
1638 ret = _attach_pt(iommu_pt, ctx);
1639 if (ret)
1640 return ret;
1641
1642 ctx->default_pt = mmu->defaultpagetable;
1643
1644 kgsl_iommu_enable_clk(mmu);
1645
1646 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
1647
1648 /*
1649 * If pagefault policy is GPUHALT_ENABLE,
1650 * 1) Program CFCFG to 1 to enable STALL mode
1651 * 2) Program HUPCF to 0 (Stall or terminate subsequent
1652 * transactions in the presence of an outstanding fault)
1653 * else
1654 * 1) Program CFCFG to 0 to disable STALL mode (0=Terminate)
1655 * 2) Program HUPCF to 1 (Process subsequent transactions
1656 * independently of any outstanding fault)
1657 */
1658
1659 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
1660 &adreno_dev->ft_pf_policy)) {
1661 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
1662 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
1663 } else {
1664 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
1665 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
1666 }
1667 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
1668 kgsl_iommu_disable_clk(mmu);
1669
1670 return 0;
1671}
1672
1673static int _setup_secure_context(struct kgsl_mmu *mmu)
1674{
1675 int ret;
1676 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1677 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
1678 unsigned int cb_num;
1679
1680 struct kgsl_iommu_pt *iommu_pt;
1681
1682 if (ctx->dev == NULL || !mmu->secured)
1683 return 0;
1684
Harshdeep Dhatt1f408332017-03-27 11:35:13 -06001685 if (mmu->securepagetable == NULL)
1686 return -ENOMEM;
1687
Shrenuj Bansala419c792016-10-20 14:05:11 -07001688 iommu_pt = mmu->securepagetable->priv;
1689
1690 ret = _attach_pt(iommu_pt, ctx);
1691 if (ret)
1692 goto done;
1693
1694 ctx->default_pt = mmu->securepagetable;
1695
1696 ret = iommu_domain_get_attr(iommu_pt->domain, DOMAIN_ATTR_CONTEXT_BANK,
1697 &cb_num);
1698 if (ret) {
1699 KGSL_CORE_ERR("get CONTEXT_BANK attr, err %d\n", ret);
1700 goto done;
1701 }
1702 ctx->cb_num = cb_num;
1703done:
1704 if (ret)
1705 _detach_context(ctx);
1706 return ret;
1707}
1708
1709static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
1710
1711static int kgsl_iommu_start(struct kgsl_mmu *mmu)
1712{
1713 int status;
1714 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1715
1716 status = _setup_user_context(mmu);
1717 if (status)
1718 return status;
1719
1720 status = _setup_secure_context(mmu);
1721 if (status) {
1722 _detach_context(&iommu->ctx[KGSL_IOMMU_CONTEXT_USER]);
1723 return status;
1724 }
1725
1726 /* Make sure the hardware is programmed to the default pagetable */
1727 return kgsl_iommu_set_pt(mmu, mmu->defaultpagetable);
1728}
1729
1730static int
1731kgsl_iommu_unmap_offset(struct kgsl_pagetable *pt,
1732 struct kgsl_memdesc *memdesc, uint64_t addr,
1733 uint64_t offset, uint64_t size)
1734{
1735 if (size == 0 || (size + offset) > kgsl_memdesc_footprint(memdesc))
1736 return -EINVAL;
1737 /*
1738 * All GPU addresses as assigned are page aligned, but some
1739 * functions perturb the gpuaddr with an offset, so apply the
1740 * mask here to make sure we have the right address.
1741 */
1742
1743 addr = PAGE_ALIGN(addr);
1744 if (addr == 0)
1745 return -EINVAL;
1746
Carter Coopera1c7cce2017-12-15 13:29:29 -07001747 return _iommu_unmap_sync_pc(pt, addr + offset, size);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001748}
1749
1750static int
1751kgsl_iommu_unmap(struct kgsl_pagetable *pt, struct kgsl_memdesc *memdesc)
1752{
1753 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
1754 return -EINVAL;
1755
1756 return kgsl_iommu_unmap_offset(pt, memdesc, memdesc->gpuaddr, 0,
1757 kgsl_memdesc_footprint(memdesc));
1758}
1759
1760/**
1761 * _iommu_map_guard_page - Map iommu guard page
1762 * @pt - Pointer to kgsl pagetable structure
1763 * @memdesc - memdesc to add guard page
1764 * @gpuaddr - GPU addr of guard page
1765 * @protflags - flags for mapping
1766 *
1767 * Return 0 on success, error on map fail
1768 */
1769static int _iommu_map_guard_page(struct kgsl_pagetable *pt,
1770 struct kgsl_memdesc *memdesc,
1771 uint64_t gpuaddr,
1772 unsigned int protflags)
1773{
1774 phys_addr_t physaddr;
1775
1776 if (!kgsl_memdesc_has_guard_page(memdesc))
1777 return 0;
1778
1779 /*
1780 * Allocate guard page for secure buffers.
1781 * This has to be done after we attach a smmu pagetable.
1782 * Allocate the guard page when first secure buffer is.
1783 * mapped to save 1MB of memory if CPZ is not used.
1784 */
1785 if (kgsl_memdesc_is_secured(memdesc)) {
1786 struct scatterlist *sg;
1787 unsigned int sgp_size = pt->mmu->secure_align_mask + 1;
1788
1789 if (!kgsl_secure_guard_page_memdesc.sgt) {
1790 if (kgsl_allocate_user(KGSL_MMU_DEVICE(pt->mmu),
1791 &kgsl_secure_guard_page_memdesc,
1792 sgp_size, KGSL_MEMFLAGS_SECURE)) {
1793 KGSL_CORE_ERR(
1794 "Secure guard page alloc failed\n");
1795 return -ENOMEM;
1796 }
1797 }
1798
1799 sg = kgsl_secure_guard_page_memdesc.sgt->sgl;
1800 physaddr = page_to_phys(sg_page(sg));
1801 } else {
1802 if (kgsl_guard_page == NULL) {
1803 kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
1804 __GFP_NORETRY | __GFP_HIGHMEM);
1805 if (kgsl_guard_page == NULL)
1806 return -ENOMEM;
1807 }
1808
1809 physaddr = page_to_phys(kgsl_guard_page);
1810 }
1811
Carter Coopera1c7cce2017-12-15 13:29:29 -07001812 return _iommu_map_sync_pc(pt, gpuaddr, physaddr,
Shrenuj Bansala419c792016-10-20 14:05:11 -07001813 kgsl_memdesc_guard_page_size(memdesc),
1814 protflags & ~IOMMU_WRITE);
1815}
1816
1817static unsigned int _get_protection_flags(struct kgsl_memdesc *memdesc)
1818{
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001819 unsigned int flags = IOMMU_READ | IOMMU_WRITE |
1820 IOMMU_NOEXEC | IOMMU_USE_UPSTREAM_HINT;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001821
1822 if (memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY)
1823 flags &= ~IOMMU_WRITE;
1824
1825 if (memdesc->priv & KGSL_MEMDESC_PRIVILEGED)
1826 flags |= IOMMU_PRIV;
1827
Shrenuj Bansal4fd6a562017-08-07 15:12:54 -07001828 if (memdesc->flags & KGSL_MEMFLAGS_IOCOHERENT)
1829 flags |= IOMMU_CACHE;
1830
Shrenuj Bansala419c792016-10-20 14:05:11 -07001831 return flags;
1832}
1833
1834static int
1835kgsl_iommu_map(struct kgsl_pagetable *pt,
1836 struct kgsl_memdesc *memdesc)
1837{
1838 int ret;
1839 uint64_t addr = memdesc->gpuaddr;
1840 uint64_t size = memdesc->size;
1841 unsigned int flags = _get_protection_flags(memdesc);
1842 struct sg_table *sgt = NULL;
1843
1844 /*
1845 * For paged memory allocated through kgsl, memdesc->pages is not NULL.
1846 * Allocate sgt here just for its map operation. Contiguous memory
1847 * already has its sgt, so no need to allocate it here.
1848 */
1849 if (memdesc->pages != NULL)
1850 sgt = kgsl_alloc_sgt_from_pages(memdesc);
1851 else
1852 sgt = memdesc->sgt;
1853
1854 if (IS_ERR(sgt))
1855 return PTR_ERR(sgt);
1856
Carter Coopera1c7cce2017-12-15 13:29:29 -07001857 ret = _iommu_map_sg_sync_pc(pt, addr, sgt->sgl, sgt->nents, flags);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001858 if (ret)
1859 goto done;
1860
1861 ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
1862 if (ret)
Carter Coopera1c7cce2017-12-15 13:29:29 -07001863 _iommu_unmap_sync_pc(pt, addr, size);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001864
1865done:
1866 if (memdesc->pages != NULL)
1867 kgsl_free_sgt(sgt);
1868
1869 return ret;
1870}
1871
1872static int kgsl_iommu_sparse_dummy_map(struct kgsl_pagetable *pt,
1873 struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size)
1874{
1875 int ret = 0, i;
1876 struct page **pages = NULL;
1877 struct sg_table sgt;
1878 int count = size >> PAGE_SHIFT;
1879
1880 /* verify the offset is within our range */
1881 if (size + offset > memdesc->size)
1882 return -EINVAL;
1883
1884 if (kgsl_dummy_page == NULL) {
1885 kgsl_dummy_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
1886 __GFP_HIGHMEM);
1887 if (kgsl_dummy_page == NULL)
1888 return -ENOMEM;
1889 }
1890
1891 pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
1892 if (pages == NULL)
1893 return -ENOMEM;
1894
1895 for (i = 0; i < count; i++)
1896 pages[i] = kgsl_dummy_page;
1897
1898 ret = sg_alloc_table_from_pages(&sgt, pages, count,
1899 0, size, GFP_KERNEL);
1900 if (ret == 0) {
1901 ret = _iommu_map_sg_sync_pc(pt, memdesc->gpuaddr + offset,
Carter Coopera1c7cce2017-12-15 13:29:29 -07001902 sgt.sgl, sgt.nents, IOMMU_READ | IOMMU_NOEXEC);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001903 sg_free_table(&sgt);
1904 }
1905
1906 kfree(pages);
1907
1908 return ret;
1909}
1910
1911static int _map_to_one_page(struct kgsl_pagetable *pt, uint64_t addr,
1912 struct kgsl_memdesc *memdesc, uint64_t physoffset,
1913 uint64_t size, unsigned int map_flags)
1914{
1915 int ret = 0, i;
1916 int pg_sz = kgsl_memdesc_get_pagesize(memdesc);
1917 int count = size >> PAGE_SHIFT;
1918 struct page *page = NULL;
1919 struct page **pages = NULL;
1920 struct sg_page_iter sg_iter;
1921 struct sg_table sgt;
1922
1923 /* Find our physaddr offset addr */
1924 if (memdesc->pages != NULL)
1925 page = memdesc->pages[physoffset >> PAGE_SHIFT];
1926 else {
1927 for_each_sg_page(memdesc->sgt->sgl, &sg_iter,
1928 memdesc->sgt->nents, physoffset >> PAGE_SHIFT) {
1929 page = sg_page_iter_page(&sg_iter);
1930 break;
1931 }
1932 }
1933
1934 if (page == NULL)
1935 return -EINVAL;
1936
1937 pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
1938 if (pages == NULL)
1939 return -ENOMEM;
1940
1941 for (i = 0; i < count; i++) {
1942 if (pg_sz != PAGE_SIZE) {
1943 struct page *tmp_page = page;
1944 int j;
1945
1946 for (j = 0; j < 16; j++, tmp_page += PAGE_SIZE)
1947 pages[i++] = tmp_page;
1948 } else
1949 pages[i] = page;
1950 }
1951
1952 ret = sg_alloc_table_from_pages(&sgt, pages, count,
1953 0, size, GFP_KERNEL);
1954 if (ret == 0) {
Carter Coopera1c7cce2017-12-15 13:29:29 -07001955 ret = _iommu_map_sg_sync_pc(pt, addr, sgt.sgl,
Shrenuj Bansala419c792016-10-20 14:05:11 -07001956 sgt.nents, map_flags);
1957 sg_free_table(&sgt);
1958 }
1959
1960 kfree(pages);
1961
1962 return ret;
1963}
1964
1965static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
1966 uint64_t virtaddr, uint64_t virtoffset,
1967 struct kgsl_memdesc *memdesc, uint64_t physoffset,
1968 uint64_t size, uint64_t feature_flag)
1969{
1970 int pg_sz;
1971 unsigned int protflags = _get_protection_flags(memdesc);
1972 int ret;
1973 struct sg_table *sgt = NULL;
1974
1975 pg_sz = kgsl_memdesc_get_pagesize(memdesc);
1976 if (!IS_ALIGNED(virtaddr | virtoffset | physoffset | size, pg_sz))
1977 return -EINVAL;
1978
1979 if (size == 0)
1980 return -EINVAL;
1981
1982 if (!(feature_flag & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS) &&
1983 size + physoffset > kgsl_memdesc_footprint(memdesc))
1984 return -EINVAL;
1985
1986 /*
1987 * For paged memory allocated through kgsl, memdesc->pages is not NULL.
1988 * Allocate sgt here just for its map operation. Contiguous memory
1989 * already has its sgt, so no need to allocate it here.
1990 */
1991 if (memdesc->pages != NULL)
1992 sgt = kgsl_alloc_sgt_from_pages(memdesc);
1993 else
1994 sgt = memdesc->sgt;
1995
1996 if (IS_ERR(sgt))
1997 return PTR_ERR(sgt);
1998
1999 if (feature_flag & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS)
2000 ret = _map_to_one_page(pt, virtaddr + virtoffset,
2001 memdesc, physoffset, size, protflags);
2002 else
2003 ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
Carter Coopera1c7cce2017-12-15 13:29:29 -07002004 sgt->sgl, sgt->nents,
Shrenuj Bansala419c792016-10-20 14:05:11 -07002005 physoffset, size, protflags);
2006
2007 if (memdesc->pages != NULL)
2008 kgsl_free_sgt(sgt);
2009
2010 return ret;
2011}
2012
2013/* This function must be called with context bank attached */
2014static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
2015{
2016 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2017 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2018 unsigned int sctlr_val;
2019
2020 if (ctx->default_pt != NULL) {
2021 kgsl_iommu_enable_clk(mmu);
2022 KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff);
2023 /*
2024 * Re-enable context fault interrupts after clearing
2025 * FSR to prevent the interrupt from firing repeatedly
2026 */
2027 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
2028 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
2029 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
2030 /*
2031 * Make sure the above register writes
2032 * are not reordered across the barrier
2033 * as we use writel_relaxed to write them
2034 */
2035 wmb();
2036 kgsl_iommu_disable_clk(mmu);
2037 }
2038}
2039
2040static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
2041{
2042 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2043 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2044
2045 if (ctx->default_pt != NULL && ctx->fault) {
2046 /*
2047 * Write 1 to RESUME.TnR to terminate the
2048 * stalled transaction.
2049 */
2050 KGSL_IOMMU_SET_CTX_REG(ctx, RESUME, 1);
2051 /*
2052 * Make sure the above register writes
2053 * are not reordered across the barrier
2054 * as we use writel_relaxed to write them
2055 */
2056 wmb();
2057 ctx->fault = 0;
2058 }
2059}
2060
2061static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
2062{
2063 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2064 int i;
2065
2066 /*
2067 * If the iommu supports retention, we don't need
2068 * to detach when stopping.
2069 */
2070 if (!MMU_FEATURE(mmu, KGSL_MMU_RETENTION)) {
2071 for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
2072 _detach_context(&iommu->ctx[i]);
2073 }
2074}
2075
2076static u64
2077kgsl_iommu_get_current_ttbr0(struct kgsl_mmu *mmu)
2078{
2079 u64 val;
2080 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
Harshdeep Dhatt1e55e212018-10-12 20:32:17 -06002081 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2082
Shrenuj Bansala419c792016-10-20 14:05:11 -07002083 /*
2084 * We cannot enable or disable the clocks in interrupt context, this
2085 * function is called from interrupt context if there is an axi error
2086 */
2087 if (in_interrupt())
2088 return 0;
2089
Harshdeep Dhatt1e55e212018-10-12 20:32:17 -06002090 if (ctx->regbase == NULL)
2091 return 0;
2092
Shrenuj Bansala419c792016-10-20 14:05:11 -07002093 kgsl_iommu_enable_clk(mmu);
Harshdeep Dhatt1e55e212018-10-12 20:32:17 -06002094 val = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002095 kgsl_iommu_disable_clk(mmu);
2096 return val;
2097}
2098
2099/*
2100 * kgsl_iommu_set_pt - Change the IOMMU pagetable of the primary context bank
2101 * @mmu - Pointer to mmu structure
2102 * @pt - Pagetable to switch to
2103 *
2104 * Set the new pagetable for the IOMMU by doing direct register writes
2105 * to the IOMMU registers through the cpu
2106 *
2107 * Return - void
2108 */
2109static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
2110{
2111 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2112 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2113 uint64_t ttbr0, temp;
2114 unsigned int contextidr;
2115 unsigned long wait_for_flush;
2116
2117 if ((pt != mmu->defaultpagetable) && !kgsl_mmu_is_perprocess(mmu))
2118 return 0;
2119
2120 kgsl_iommu_enable_clk(mmu);
2121
2122 ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pt);
2123 contextidr = kgsl_mmu_pagetable_get_contextidr(pt);
2124
2125 KGSL_IOMMU_SET_CTX_REG_Q(ctx, TTBR0, ttbr0);
2126 KGSL_IOMMU_SET_CTX_REG(ctx, CONTEXTIDR, contextidr);
2127
2128 /* memory barrier before reading TTBR0 register */
2129 mb();
2130 temp = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
2131
2132 KGSL_IOMMU_SET_CTX_REG(ctx, TLBIALL, 1);
2133 /* make sure the TBLI write completes before we wait */
2134 mb();
2135 /*
2136 * Wait for flush to complete by polling the flush
2137 * status bit of TLBSTATUS register for not more than
2138 * 2 s. After 2s just exit, at that point the SMMU h/w
2139 * may be stuck and will eventually cause GPU to hang
2140 * or bring the system down.
2141 */
2142 wait_for_flush = jiffies + msecs_to_jiffies(2000);
2143 KGSL_IOMMU_SET_CTX_REG(ctx, TLBSYNC, 0);
2144 while (KGSL_IOMMU_GET_CTX_REG(ctx, TLBSTATUS) &
2145 (KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE)) {
2146 if (time_after(jiffies, wait_for_flush)) {
2147 KGSL_DRV_WARN(KGSL_MMU_DEVICE(mmu),
2148 "Wait limit reached for IOMMU tlb flush\n");
2149 break;
2150 }
2151 cpu_relax();
2152 }
2153
2154 kgsl_iommu_disable_clk(mmu);
2155 return 0;
2156}
2157
2158/*
2159 * kgsl_iommu_set_pf_policy() - Set the pagefault policy for IOMMU
2160 * @mmu: Pointer to mmu structure
2161 * @pf_policy: The pagefault polict to set
2162 *
2163 * Check if the new policy indicated by pf_policy is same as current
2164 * policy, if same then return else set the policy
2165 */
2166static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
2167 unsigned long pf_policy)
2168{
2169 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2170 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2171 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
2172 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2173
2174 if ((adreno_dev->ft_pf_policy &
2175 BIT(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)) ==
2176 (pf_policy & BIT(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)))
2177 return 0;
2178
2179 /* If not attached, policy will be updated during the next attach */
2180 if (ctx->default_pt != NULL) {
2181 unsigned int sctlr_val;
2182
2183 kgsl_iommu_enable_clk(mmu);
2184
2185 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
2186
2187 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, &pf_policy)) {
2188 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
2189 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
2190 } else {
2191 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
2192 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
2193 }
2194
2195 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
2196
2197 kgsl_iommu_disable_clk(mmu);
2198 }
2199
2200 return 0;
2201}
2202
2203static struct kgsl_protected_registers *
2204kgsl_iommu_get_prot_regs(struct kgsl_mmu *mmu)
2205{
2206 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2207
2208 return &iommu->protect;
2209}
2210
2211static struct kgsl_iommu_addr_entry *_find_gpuaddr(
2212 struct kgsl_pagetable *pagetable, uint64_t gpuaddr)
2213{
2214 struct kgsl_iommu_pt *pt = pagetable->priv;
2215 struct rb_node *node = pt->rbtree.rb_node;
2216
2217 while (node != NULL) {
2218 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2219 struct kgsl_iommu_addr_entry, node);
2220
2221 if (gpuaddr < entry->base)
2222 node = node->rb_left;
2223 else if (gpuaddr > entry->base)
2224 node = node->rb_right;
2225 else
2226 return entry;
2227 }
2228
2229 return NULL;
2230}
2231
2232static int _remove_gpuaddr(struct kgsl_pagetable *pagetable,
2233 uint64_t gpuaddr)
2234{
2235 struct kgsl_iommu_pt *pt = pagetable->priv;
2236 struct kgsl_iommu_addr_entry *entry;
2237
2238 entry = _find_gpuaddr(pagetable, gpuaddr);
2239
2240 if (entry != NULL) {
2241 rb_erase(&entry->node, &pt->rbtree);
2242 kmem_cache_free(addr_entry_cache, entry);
2243 return 0;
2244 }
2245
2246 WARN(1, "Couldn't remove gpuaddr: 0x%llx\n", gpuaddr);
2247 return -ENOMEM;
2248}
2249
2250static int _insert_gpuaddr(struct kgsl_pagetable *pagetable,
2251 uint64_t gpuaddr, uint64_t size)
2252{
2253 struct kgsl_iommu_pt *pt = pagetable->priv;
2254 struct rb_node **node, *parent = NULL;
2255 struct kgsl_iommu_addr_entry *new =
2256 kmem_cache_alloc(addr_entry_cache, GFP_ATOMIC);
2257
2258 if (new == NULL)
2259 return -ENOMEM;
2260
2261 new->base = gpuaddr;
2262 new->size = size;
2263
2264 node = &pt->rbtree.rb_node;
2265
2266 while (*node != NULL) {
2267 struct kgsl_iommu_addr_entry *this;
2268
2269 parent = *node;
2270 this = rb_entry(parent, struct kgsl_iommu_addr_entry, node);
2271
2272 if (new->base < this->base)
2273 node = &parent->rb_left;
2274 else if (new->base > this->base)
2275 node = &parent->rb_right;
2276 else {
2277 /* Duplicate entry */
2278 WARN(1, "duplicate gpuaddr: 0x%llx\n", gpuaddr);
2279 return -EEXIST;
2280 }
2281 }
2282
2283 rb_link_node(&new->node, parent, node);
2284 rb_insert_color(&new->node, &pt->rbtree);
2285
2286 return 0;
2287}
2288
2289static uint64_t _get_unmapped_area(struct kgsl_pagetable *pagetable,
2290 uint64_t bottom, uint64_t top, uint64_t size,
2291 uint64_t align)
2292{
2293 struct kgsl_iommu_pt *pt = pagetable->priv;
2294 struct rb_node *node = rb_first(&pt->rbtree);
2295 uint64_t start;
2296
2297 bottom = ALIGN(bottom, align);
2298 start = bottom;
2299
2300 while (node != NULL) {
2301 uint64_t gap;
2302 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2303 struct kgsl_iommu_addr_entry, node);
2304
2305 /*
2306 * Skip any entries that are outside of the range, but make sure
2307 * to account for some that might straddle the lower bound
2308 */
2309 if (entry->base < bottom) {
2310 if (entry->base + entry->size > bottom)
2311 start = ALIGN(entry->base + entry->size, align);
2312 node = rb_next(node);
2313 continue;
2314 }
2315
2316 /* Stop if we went over the top */
2317 if (entry->base >= top)
2318 break;
2319
2320 /* Make sure there is a gap to consider */
2321 if (start < entry->base) {
2322 gap = entry->base - start;
2323
2324 if (gap >= size)
2325 return start;
2326 }
2327
2328 /* Stop if there is no more room in the region */
2329 if (entry->base + entry->size >= top)
2330 return (uint64_t) -ENOMEM;
2331
2332 /* Start the next cycle at the end of the current entry */
2333 start = ALIGN(entry->base + entry->size, align);
2334 node = rb_next(node);
2335 }
2336
2337 if (start + size <= top)
2338 return start;
2339
2340 return (uint64_t) -ENOMEM;
2341}
2342
2343static uint64_t _get_unmapped_area_topdown(struct kgsl_pagetable *pagetable,
2344 uint64_t bottom, uint64_t top, uint64_t size,
2345 uint64_t align)
2346{
2347 struct kgsl_iommu_pt *pt = pagetable->priv;
2348 struct rb_node *node = rb_last(&pt->rbtree);
2349 uint64_t end = top;
2350 uint64_t mask = ~(align - 1);
2351 struct kgsl_iommu_addr_entry *entry;
2352
2353 /* Make sure that the bottom is correctly aligned */
2354 bottom = ALIGN(bottom, align);
2355
2356 /* Make sure the requested size will fit in the range */
2357 if (size > (top - bottom))
2358 return -ENOMEM;
2359
2360 /* Walk back through the list to find the highest entry in the range */
2361 for (node = rb_last(&pt->rbtree); node != NULL; node = rb_prev(node)) {
2362 entry = rb_entry(node, struct kgsl_iommu_addr_entry, node);
2363 if (entry->base < top)
2364 break;
2365 }
2366
2367 while (node != NULL) {
2368 uint64_t offset;
2369
2370 entry = rb_entry(node, struct kgsl_iommu_addr_entry, node);
2371
2372 /* If the entire entry is below the range the search is over */
2373 if ((entry->base + entry->size) < bottom)
2374 break;
2375
2376 /* Get the top of the entry properly aligned */
2377 offset = ALIGN(entry->base + entry->size, align);
2378
2379 /*
2380 * Try to allocate the memory from the top of the gap,
2381 * making sure that it fits between the top of this entry and
2382 * the bottom of the previous one
2383 */
2384
2385 if ((end > size) && (offset < end)) {
2386 uint64_t chunk = (end - size) & mask;
2387
2388 if (chunk >= offset)
2389 return chunk;
2390 }
2391
2392 /*
2393 * If we get here and the current entry is outside of the range
2394 * then we are officially out of room
2395 */
2396
2397 if (entry->base < bottom)
2398 return (uint64_t) -ENOMEM;
2399
2400 /* Set the top of the gap to the current entry->base */
2401 end = entry->base;
2402
2403 /* And move on to the next lower entry */
2404 node = rb_prev(node);
2405 }
2406
2407 /* If we get here then there are no more entries in the region */
2408 if ((end > size) && (((end - size) & mask) >= bottom))
2409 return (end - size) & mask;
2410
2411 return (uint64_t) -ENOMEM;
2412}
2413
2414static uint64_t kgsl_iommu_find_svm_region(struct kgsl_pagetable *pagetable,
2415 uint64_t start, uint64_t end, uint64_t size,
2416 uint64_t alignment)
2417{
2418 uint64_t addr;
2419
2420 /* Avoid black holes */
2421 if (WARN(end <= start, "Bad search range: 0x%llx-0x%llx", start, end))
2422 return (uint64_t) -EINVAL;
2423
2424 spin_lock(&pagetable->lock);
2425 addr = _get_unmapped_area_topdown(pagetable,
2426 start, end, size, alignment);
2427 spin_unlock(&pagetable->lock);
2428 return addr;
2429}
2430
gkiranku0eecd242020-07-29 18:28:57 +05302431static bool iommu_addr_in_svm_ranges(struct kgsl_iommu_pt *pt,
2432 u64 gpuaddr, u64 size)
2433{
Sanjay Yadav3c9f3402023-09-14 18:04:52 +05302434 u64 end = gpuaddr + size;
2435
2436 /* Make sure size is not zero and we don't wrap around */
2437 if (end <= gpuaddr)
2438 return false;
2439
gkiranku0eecd242020-07-29 18:28:57 +05302440 if ((gpuaddr >= pt->compat_va_start && gpuaddr < pt->compat_va_end) &&
Sanjay Yadav3c9f3402023-09-14 18:04:52 +05302441 (end > pt->compat_va_start && end <= pt->compat_va_end))
gkiranku0eecd242020-07-29 18:28:57 +05302442 return true;
2443
2444 if ((gpuaddr >= pt->svm_start && gpuaddr < pt->svm_end) &&
Sanjay Yadav3c9f3402023-09-14 18:04:52 +05302445 (end > pt->svm_start && end <= pt->svm_end))
gkiranku0eecd242020-07-29 18:28:57 +05302446 return true;
2447
2448 return false;
2449}
2450
Shrenuj Bansala419c792016-10-20 14:05:11 -07002451static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
2452 uint64_t gpuaddr, uint64_t size)
2453{
2454 int ret = -ENOMEM;
2455 struct kgsl_iommu_pt *pt = pagetable->priv;
2456 struct rb_node *node;
2457
gkiranku0eecd242020-07-29 18:28:57 +05302458 /* Make sure the requested address doesn't fall out of SVM range */
2459 if (!iommu_addr_in_svm_ranges(pt, gpuaddr, size))
Shrenuj Bansala419c792016-10-20 14:05:11 -07002460 return -ENOMEM;
2461
2462 spin_lock(&pagetable->lock);
2463 node = pt->rbtree.rb_node;
2464
2465 while (node != NULL) {
2466 uint64_t start, end;
2467 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2468 struct kgsl_iommu_addr_entry, node);
2469
2470 start = entry->base;
2471 end = entry->base + entry->size;
2472
2473 if (gpuaddr + size <= start)
2474 node = node->rb_left;
2475 else if (end <= gpuaddr)
2476 node = node->rb_right;
2477 else
2478 goto out;
2479 }
2480
2481 ret = _insert_gpuaddr(pagetable, gpuaddr, size);
2482out:
2483 spin_unlock(&pagetable->lock);
2484 return ret;
2485}
2486
2487
2488static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
2489 struct kgsl_memdesc *memdesc)
2490{
2491 struct kgsl_iommu_pt *pt = pagetable->priv;
2492 int ret = 0;
2493 uint64_t addr, start, end, size;
2494 unsigned int align;
2495
2496 if (WARN_ON(kgsl_memdesc_use_cpu_map(memdesc)))
2497 return -EINVAL;
2498
2499 if (memdesc->flags & KGSL_MEMFLAGS_SECURE &&
2500 pagetable->name != KGSL_MMU_SECURE_PT)
2501 return -EINVAL;
2502
2503 size = kgsl_memdesc_footprint(memdesc);
2504
2505 align = 1 << kgsl_memdesc_get_align(memdesc);
2506
2507 if (memdesc->flags & KGSL_MEMFLAGS_FORCE_32BIT) {
2508 start = pt->compat_va_start;
2509 end = pt->compat_va_end;
2510 } else {
2511 start = pt->va_start;
2512 end = pt->va_end;
2513 }
2514
Harshdeep Dhatt1f408332017-03-27 11:35:13 -06002515 /*
2516 * When mapping secure buffers, adjust the start of the va range
2517 * to the end of secure global buffers.
2518 */
2519 if (kgsl_memdesc_is_secured(memdesc))
2520 start += secure_global_size;
2521
Shrenuj Bansala419c792016-10-20 14:05:11 -07002522 spin_lock(&pagetable->lock);
2523
2524 addr = _get_unmapped_area(pagetable, start, end, size, align);
2525
2526 if (addr == (uint64_t) -ENOMEM) {
2527 ret = -ENOMEM;
2528 goto out;
2529 }
2530
Jordan Crouse286b1152020-12-30 16:30:50 +05302531 /*
2532 * This path is only called in a non-SVM path with locks so we can be
2533 * sure we aren't racing with anybody so we don't need to worry about
2534 * taking the lock
2535 */
Shrenuj Bansala419c792016-10-20 14:05:11 -07002536 ret = _insert_gpuaddr(pagetable, addr, size);
2537 if (ret == 0) {
2538 memdesc->gpuaddr = addr;
2539 memdesc->pagetable = pagetable;
2540 }
2541
2542out:
2543 spin_unlock(&pagetable->lock);
2544 return ret;
2545}
2546
2547static void kgsl_iommu_put_gpuaddr(struct kgsl_memdesc *memdesc)
2548{
2549 if (memdesc->pagetable == NULL)
2550 return;
2551
2552 spin_lock(&memdesc->pagetable->lock);
2553
2554 _remove_gpuaddr(memdesc->pagetable, memdesc->gpuaddr);
2555
2556 spin_unlock(&memdesc->pagetable->lock);
2557}
2558
2559static int kgsl_iommu_svm_range(struct kgsl_pagetable *pagetable,
2560 uint64_t *lo, uint64_t *hi, uint64_t memflags)
2561{
2562 struct kgsl_iommu_pt *pt = pagetable->priv;
2563 bool gpu_compat = (memflags & KGSL_MEMFLAGS_FORCE_32BIT) != 0;
2564
2565 if (lo != NULL)
2566 *lo = gpu_compat ? pt->compat_va_start : pt->svm_start;
2567 if (hi != NULL)
2568 *hi = gpu_compat ? pt->compat_va_end : pt->svm_end;
2569
2570 return 0;
2571}
2572
2573static bool kgsl_iommu_addr_in_range(struct kgsl_pagetable *pagetable,
Abhishek Barman4e80eb02021-12-07 12:50:34 +05302574 uint64_t gpuaddr, uint64_t size)
Shrenuj Bansala419c792016-10-20 14:05:11 -07002575{
2576 struct kgsl_iommu_pt *pt = pagetable->priv;
2577
2578 if (gpuaddr == 0)
2579 return false;
2580
Abhishek Barman4e80eb02021-12-07 12:50:34 +05302581 if (gpuaddr >= pt->va_start && (gpuaddr + size) < pt->va_end)
Shrenuj Bansala419c792016-10-20 14:05:11 -07002582 return true;
2583
Abhishek Barman4e80eb02021-12-07 12:50:34 +05302584 if (gpuaddr >= pt->compat_va_start &&
2585 (gpuaddr + size) < pt->compat_va_end)
Shrenuj Bansala419c792016-10-20 14:05:11 -07002586 return true;
2587
Abhishek Barman4e80eb02021-12-07 12:50:34 +05302588 if (gpuaddr >= pt->svm_start && (gpuaddr + size) < pt->svm_end)
Shrenuj Bansala419c792016-10-20 14:05:11 -07002589 return true;
2590
2591 return false;
2592}
2593
2594static const struct {
2595 int id;
2596 char *name;
2597} kgsl_iommu_cbs[] = {
2598 { KGSL_IOMMU_CONTEXT_USER, "gfx3d_user", },
2599 { KGSL_IOMMU_CONTEXT_SECURE, "gfx3d_secure" },
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302600 { KGSL_IOMMU_CONTEXT_SECURE, "gfx3d_secure_alt" },
Shrenuj Bansala419c792016-10-20 14:05:11 -07002601};
2602
2603static int _kgsl_iommu_cb_probe(struct kgsl_device *device,
2604 struct kgsl_iommu *iommu, struct device_node *node)
2605{
2606 struct platform_device *pdev = of_find_device_by_node(node);
2607 struct kgsl_iommu_context *ctx = NULL;
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302608 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002609 int i;
2610
2611 for (i = 0; i < ARRAY_SIZE(kgsl_iommu_cbs); i++) {
2612 if (!strcmp(node->name, kgsl_iommu_cbs[i].name)) {
2613 int id = kgsl_iommu_cbs[i].id;
2614
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302615 if (ADRENO_QUIRK(adreno_dev,
2616 ADRENO_QUIRK_MMU_SECURE_CB_ALT)) {
2617 if (!strcmp(node->name, "gfx3d_secure"))
2618 continue;
2619 } else if (!strcmp(node->name, "gfx3d_secure_alt"))
2620 continue;
2621
Shrenuj Bansala419c792016-10-20 14:05:11 -07002622 ctx = &iommu->ctx[id];
2623 ctx->id = id;
2624 ctx->cb_num = -1;
2625 ctx->name = kgsl_iommu_cbs[i].name;
2626
2627 break;
2628 }
2629 }
2630
2631 if (ctx == NULL) {
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302632 KGSL_CORE_ERR("dt: Unused context label %s\n", node->name);
2633 return 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002634 }
2635
2636 if (ctx->id == KGSL_IOMMU_CONTEXT_SECURE)
2637 device->mmu.secured = true;
2638
2639 /* this property won't be found for all context banks */
2640 if (of_property_read_u32(node, "qcom,gpu-offset", &ctx->gpu_offset))
2641 ctx->gpu_offset = UINT_MAX;
2642
2643 ctx->kgsldev = device;
2644
2645 /* arm-smmu driver we'll have the right device pointer here. */
2646 if (of_find_property(node, "iommus", NULL)) {
2647 ctx->dev = &pdev->dev;
2648 } else {
2649 ctx->dev = kgsl_mmu_get_ctx(ctx->name);
2650
2651 if (IS_ERR(ctx->dev))
2652 return PTR_ERR(ctx->dev);
2653 }
2654
2655 return 0;
2656}
2657
2658static const struct {
2659 char *feature;
Lynus Vazeb7af682017-04-17 18:36:01 +05302660 unsigned long bit;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002661} kgsl_iommu_features[] = {
2662 { "qcom,retention", KGSL_MMU_RETENTION },
2663 { "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
2664 { "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC },
2665 { "qcom,force-32bit", KGSL_MMU_FORCE_32BIT },
2666};
2667
2668static int _kgsl_iommu_probe(struct kgsl_device *device,
2669 struct device_node *node)
2670{
2671 const char *cname;
2672 struct property *prop;
2673 u32 reg_val[2];
2674 int i = 0;
2675 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
2676 struct device_node *child;
2677 struct platform_device *pdev = of_find_device_by_node(node);
2678
2679 memset(iommu, 0, sizeof(*iommu));
2680
2681 if (of_device_is_compatible(node, "qcom,kgsl-smmu-v1"))
2682 iommu->version = 1;
2683 else
2684 iommu->version = 2;
2685
2686 if (of_property_read_u32_array(node, "reg", reg_val, 2)) {
2687 KGSL_CORE_ERR("dt: Unable to read KGSL IOMMU register range\n");
2688 return -EINVAL;
2689 }
2690 iommu->regstart = reg_val[0];
2691 iommu->regsize = reg_val[1];
2692
2693 /* Protecting the SMMU registers is mandatory */
2694 if (of_property_read_u32_array(node, "qcom,protect", reg_val, 2)) {
2695 KGSL_CORE_ERR("dt: no iommu protection range specified\n");
2696 return -EINVAL;
2697 }
2698 iommu->protect.base = reg_val[0] / sizeof(u32);
Lynus Vaz607a42d2018-05-23 20:26:51 +05302699 iommu->protect.range = reg_val[1] / sizeof(u32);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002700
2701 of_property_for_each_string(node, "clock-names", prop, cname) {
2702 struct clk *c = devm_clk_get(&pdev->dev, cname);
2703
2704 if (IS_ERR(c)) {
2705 KGSL_CORE_ERR("dt: Couldn't get clock: %s\n", cname);
2706 return -ENODEV;
2707 }
2708 if (i >= KGSL_IOMMU_MAX_CLKS) {
2709 KGSL_CORE_ERR("dt: too many clocks defined.\n");
2710 return -EINVAL;
2711 }
2712
2713 iommu->clks[i] = c;
2714 ++i;
2715 }
2716
2717 for (i = 0; i < ARRAY_SIZE(kgsl_iommu_features); i++) {
2718 if (of_property_read_bool(node, kgsl_iommu_features[i].feature))
2719 device->mmu.features |= kgsl_iommu_features[i].bit;
2720 }
2721
2722 if (of_property_read_u32(node, "qcom,micro-mmu-control",
2723 &iommu->micro_mmu_ctrl))
2724 iommu->micro_mmu_ctrl = UINT_MAX;
2725
2726 if (of_property_read_u32(node, "qcom,secure_align_mask",
2727 &device->mmu.secure_align_mask))
2728 device->mmu.secure_align_mask = 0xfff;
2729
2730 /* Fill out the rest of the devices in the node */
2731 of_platform_populate(node, NULL, NULL, &pdev->dev);
2732
2733 for_each_child_of_node(node, child) {
2734 int ret;
2735
2736 if (!of_device_is_compatible(child, "qcom,smmu-kgsl-cb"))
2737 continue;
2738
2739 ret = _kgsl_iommu_cb_probe(device, iommu, child);
2740 if (ret)
2741 return ret;
2742 }
2743
2744 return 0;
2745}
2746
2747static const struct {
2748 char *compat;
2749 int (*probe)(struct kgsl_device *device, struct device_node *node);
2750} kgsl_dt_devices[] = {
2751 { "qcom,kgsl-smmu-v1", _kgsl_iommu_probe },
2752 { "qcom,kgsl-smmu-v2", _kgsl_iommu_probe },
2753};
2754
2755static int kgsl_iommu_probe(struct kgsl_device *device)
2756{
2757 int i;
2758
2759 for (i = 0; i < ARRAY_SIZE(kgsl_dt_devices); i++) {
2760 struct device_node *node;
2761
2762 node = of_find_compatible_node(device->pdev->dev.of_node,
2763 NULL, kgsl_dt_devices[i].compat);
2764
2765 if (node != NULL)
2766 return kgsl_dt_devices[i].probe(device, node);
2767 }
2768
2769 return -ENODEV;
2770}
2771
2772struct kgsl_mmu_ops kgsl_iommu_ops = {
2773 .mmu_init = kgsl_iommu_init,
2774 .mmu_close = kgsl_iommu_close,
2775 .mmu_start = kgsl_iommu_start,
2776 .mmu_stop = kgsl_iommu_stop,
2777 .mmu_set_pt = kgsl_iommu_set_pt,
2778 .mmu_clear_fsr = kgsl_iommu_clear_fsr,
2779 .mmu_get_current_ttbr0 = kgsl_iommu_get_current_ttbr0,
2780 .mmu_enable_clk = kgsl_iommu_enable_clk,
2781 .mmu_disable_clk = kgsl_iommu_disable_clk,
2782 .mmu_get_reg_ahbaddr = kgsl_iommu_get_reg_ahbaddr,
2783 .mmu_pt_equal = kgsl_iommu_pt_equal,
2784 .mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
2785 .mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
2786 .mmu_get_prot_regs = kgsl_iommu_get_prot_regs,
2787 .mmu_init_pt = kgsl_iommu_init_pt,
2788 .mmu_add_global = kgsl_iommu_add_global,
2789 .mmu_remove_global = kgsl_iommu_remove_global,
2790 .mmu_getpagetable = kgsl_iommu_getpagetable,
2791 .mmu_get_qdss_global_entry = kgsl_iommu_get_qdss_global_entry,
Jonathan Wicks4892d8d2017-02-24 16:21:26 -07002792 .mmu_get_qtimer_global_entry = kgsl_iommu_get_qtimer_global_entry,
Shrenuj Bansala419c792016-10-20 14:05:11 -07002793 .probe = kgsl_iommu_probe,
2794};
2795
2796static struct kgsl_mmu_pt_ops iommu_pt_ops = {
2797 .mmu_map = kgsl_iommu_map,
2798 .mmu_unmap = kgsl_iommu_unmap,
2799 .mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
2800 .get_ttbr0 = kgsl_iommu_get_ttbr0,
2801 .get_contextidr = kgsl_iommu_get_contextidr,
2802 .get_gpuaddr = kgsl_iommu_get_gpuaddr,
2803 .put_gpuaddr = kgsl_iommu_put_gpuaddr,
2804 .set_svm_region = kgsl_iommu_set_svm_region,
2805 .find_svm_region = kgsl_iommu_find_svm_region,
2806 .svm_range = kgsl_iommu_svm_range,
2807 .addr_in_range = kgsl_iommu_addr_in_range,
2808 .mmu_map_offset = kgsl_iommu_map_offset,
2809 .mmu_unmap_offset = kgsl_iommu_unmap_offset,
2810 .mmu_sparse_dummy_map = kgsl_iommu_sparse_dummy_map,
2811};