blob: 87e143191c29079f7b25f0ad5dab8dbbf591ee65 [file] [log] [blame]
Sunil Khatri82eb1ec2018-01-09 15:28:14 +05301/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/spinlock.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/iommu.h>
20#include <linux/msm_kgsl.h>
21#include <linux/ratelimit.h>
22#include <linux/of_platform.h>
23#include <soc/qcom/scm.h>
24#include <soc/qcom/secure_buffer.h>
Shrenuj Bansala419c792016-10-20 14:05:11 -070025#include <linux/compat.h>
26
27#include "kgsl.h"
28#include "kgsl_device.h"
29#include "kgsl_mmu.h"
30#include "kgsl_sharedmem.h"
31#include "kgsl_iommu.h"
32#include "adreno_pm4types.h"
33#include "adreno.h"
34#include "kgsl_trace.h"
35#include "kgsl_pwrctrl.h"
36
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -070037#define CP_APERTURE_REG 0
Sunil Khatri82eb1ec2018-01-09 15:28:14 +053038#define CP_SMMU_APERTURE_ID 0x1B
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -070039
Shrenuj Bansala419c792016-10-20 14:05:11 -070040#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
41
Deepak Kumar756d6a92017-11-28 16:58:29 +053042#define ADDR_IN_GLOBAL(_mmu, _a) \
43 (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)) && \
44 ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) + \
45 KGSL_IOMMU_GLOBAL_MEM_SIZE)))
Shrenuj Bansala419c792016-10-20 14:05:11 -070046
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060047/*
48 * Flag to set SMMU memory attributes required to
49 * enable system cache for GPU transactions.
50 */
51#ifndef IOMMU_USE_UPSTREAM_HINT
52#define IOMMU_USE_UPSTREAM_HINT 0
53#endif
54
Shrenuj Bansala419c792016-10-20 14:05:11 -070055static struct kgsl_mmu_pt_ops iommu_pt_ops;
56static bool need_iommu_sync;
57
58const unsigned int kgsl_iommu_reg_list[KGSL_IOMMU_REG_MAX] = {
59 0x0,/* SCTLR */
60 0x20,/* TTBR0 */
61 0x34,/* CONTEXTIDR */
62 0x58,/* FSR */
63 0x60,/* FAR_0 */
64 0x618,/* TLBIALL */
65 0x008,/* RESUME */
66 0x68,/* FSYNR0 */
67 0x6C,/* FSYNR1 */
68 0x7F0,/* TLBSYNC */
69 0x7F4,/* TLBSTATUS */
70};
71
72/*
73 * struct kgsl_iommu_addr_entry - entry in the kgsl_iommu_pt rbtree.
74 * @base: starting virtual address of the entry
75 * @size: size of the entry
76 * @node: the rbtree node
77 *
78 */
79struct kgsl_iommu_addr_entry {
80 uint64_t base;
81 uint64_t size;
82 struct rb_node node;
83};
84
85static struct kmem_cache *addr_entry_cache;
86
87/*
88 * There are certain memory allocations (ringbuffer, memstore, etc) that need to
89 * be present at the same address in every pagetable. We call these "global"
90 * pagetable entries. There are relatively few of these and they are mostly
91 * stable (defined at init time) but the actual number of globals can differ
92 * slight depending on the target and implementation.
93 *
94 * Here we define an array and a simple allocator to keep track of the currently
95 * active global entries. Each entry is assigned a unique address inside of a
96 * MMU implementation specific "global" region. The addresses are assigned
97 * sequentially and never re-used to avoid having to go back and reprogram
98 * existing pagetables. The entire list of active entries are mapped and
99 * unmapped into every new pagetable as it is created and destroyed.
100 *
101 * Because there are relatively few entries and they are defined at boot time we
102 * don't need to go over the top to define a dynamic allocation scheme. It will
103 * be less wasteful to pick a static number with a little bit of growth
104 * potential.
105 */
106
107#define GLOBAL_PT_ENTRIES 32
108
109struct global_pt_entry {
110 struct kgsl_memdesc *memdesc;
111 char name[32];
112};
113
114static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES];
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600115static int secure_global_size;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700116static int global_pt_count;
117uint64_t global_pt_alloc;
118static struct kgsl_memdesc gpu_qdss_desc;
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700119static struct kgsl_memdesc gpu_qtimer_desc;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700120
121void kgsl_print_global_pt_entries(struct seq_file *s)
122{
123 int i;
124
125 for (i = 0; i < global_pt_count; i++) {
126 struct kgsl_memdesc *memdesc = global_pt_entries[i].memdesc;
127
128 if (memdesc == NULL)
129 continue;
130
Hareesh Gundu1fbd9062017-11-01 18:47:45 +0530131 seq_printf(s, "0x%pK-0x%pK %16llu %s\n",
132 (uint64_t *)(uintptr_t) memdesc->gpuaddr,
133 (uint64_t *)(uintptr_t) (memdesc->gpuaddr +
134 memdesc->size - 1), memdesc->size,
135 global_pt_entries[i].name);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700136 }
137}
138
139static void kgsl_iommu_unmap_globals(struct kgsl_pagetable *pagetable)
140{
141 unsigned int i;
142
143 for (i = 0; i < global_pt_count; i++) {
144 if (global_pt_entries[i].memdesc != NULL)
145 kgsl_mmu_unmap(pagetable,
146 global_pt_entries[i].memdesc);
147 }
148}
149
150static int kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable)
151{
152 unsigned int i;
153
154 for (i = 0; i < global_pt_count; i++) {
155 if (global_pt_entries[i].memdesc != NULL) {
156 int ret = kgsl_mmu_map(pagetable,
157 global_pt_entries[i].memdesc);
158
159 if (ret)
160 return ret;
161 }
162 }
163
164 return 0;
165}
166
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600167void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device,
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700168 struct kgsl_memdesc *memdesc)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700169{
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700170 if (!kgsl_mmu_is_secured(&device->mmu) || memdesc == NULL)
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600171 return;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700172
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700173 /* Check if an empty memdesc got passed in */
174 if ((memdesc->gpuaddr == 0) || (memdesc->size == 0))
175 return;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700176
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700177 if (memdesc->pagetable) {
178 if (memdesc->pagetable->name == KGSL_MMU_SECURE_PT)
179 kgsl_mmu_unmap(memdesc->pagetable, memdesc);
180 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700181}
182
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600183int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device,
184 struct kgsl_memdesc *entry)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700185{
186 int ret = 0;
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600187
188 if (!kgsl_mmu_is_secured(&device->mmu))
189 return -ENOTSUPP;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700190
191 if (entry != NULL) {
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600192 struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700193 entry->pagetable = pagetable;
Deepak Kumar756d6a92017-11-28 16:58:29 +0530194 entry->gpuaddr = KGSL_IOMMU_SECURE_BASE(&device->mmu) +
195 secure_global_size;
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600196
Shrenuj Bansala419c792016-10-20 14:05:11 -0700197 ret = kgsl_mmu_map(pagetable, entry);
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600198 if (ret == 0)
199 secure_global_size += entry->size;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700200 }
201 return ret;
202}
203
204static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu,
205 struct kgsl_memdesc *memdesc)
206{
207 int i;
208
209 if (memdesc->gpuaddr == 0 || !(memdesc->priv & KGSL_MEMDESC_GLOBAL))
210 return;
211
212 for (i = 0; i < global_pt_count; i++) {
213 if (global_pt_entries[i].memdesc == memdesc) {
214 memdesc->gpuaddr = 0;
215 memdesc->priv &= ~KGSL_MEMDESC_GLOBAL;
216 global_pt_entries[i].memdesc = NULL;
217 return;
218 }
219 }
220}
221
222static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
223 struct kgsl_memdesc *memdesc, const char *name)
224{
225 if (memdesc->gpuaddr != 0)
226 return;
227
228 /*Check that we can fit the global allocations */
229 if (WARN_ON(global_pt_count >= GLOBAL_PT_ENTRIES) ||
230 WARN_ON((global_pt_alloc + memdesc->size) >=
231 KGSL_IOMMU_GLOBAL_MEM_SIZE))
232 return;
233
Deepak Kumar756d6a92017-11-28 16:58:29 +0530234 memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + global_pt_alloc;
235
Shrenuj Bansala419c792016-10-20 14:05:11 -0700236 memdesc->priv |= KGSL_MEMDESC_GLOBAL;
237 global_pt_alloc += memdesc->size;
238
239 global_pt_entries[global_pt_count].memdesc = memdesc;
240 strlcpy(global_pt_entries[global_pt_count].name, name,
241 sizeof(global_pt_entries[global_pt_count].name));
242 global_pt_count++;
243}
244
Shrenuj Bansala419c792016-10-20 14:05:11 -0700245struct kgsl_memdesc *kgsl_iommu_get_qdss_global_entry(void)
246{
247 return &gpu_qdss_desc;
248}
249
250static void kgsl_setup_qdss_desc(struct kgsl_device *device)
251{
252 int result = 0;
253 uint32_t gpu_qdss_entry[2];
254
255 if (!of_find_property(device->pdev->dev.of_node,
256 "qcom,gpu-qdss-stm", NULL))
257 return;
258
259 if (of_property_read_u32_array(device->pdev->dev.of_node,
260 "qcom,gpu-qdss-stm", gpu_qdss_entry, 2)) {
261 KGSL_CORE_ERR("Failed to read gpu qdss dts entry\n");
262 return;
263 }
264
Lynus Vaz90d98b52018-04-09 14:45:36 +0530265 kgsl_memdesc_init(device, &gpu_qdss_desc, 0);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700266 gpu_qdss_desc.priv = 0;
267 gpu_qdss_desc.physaddr = gpu_qdss_entry[0];
268 gpu_qdss_desc.size = gpu_qdss_entry[1];
269 gpu_qdss_desc.pagetable = NULL;
270 gpu_qdss_desc.ops = NULL;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700271 gpu_qdss_desc.hostptr = NULL;
272
273 result = memdesc_sg_dma(&gpu_qdss_desc, gpu_qdss_desc.physaddr,
274 gpu_qdss_desc.size);
275 if (result) {
276 KGSL_CORE_ERR("memdesc_sg_dma failed: %d\n", result);
277 return;
278 }
279
280 kgsl_mmu_add_global(device, &gpu_qdss_desc, "gpu-qdss");
281}
282
283static inline void kgsl_cleanup_qdss_desc(struct kgsl_mmu *mmu)
284{
285 kgsl_iommu_remove_global(mmu, &gpu_qdss_desc);
286 kgsl_sharedmem_free(&gpu_qdss_desc);
287}
288
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700289struct kgsl_memdesc *kgsl_iommu_get_qtimer_global_entry(void)
290{
291 return &gpu_qtimer_desc;
292}
293
294static void kgsl_setup_qtimer_desc(struct kgsl_device *device)
295{
296 int result = 0;
297 uint32_t gpu_qtimer_entry[2];
298
299 if (!of_find_property(device->pdev->dev.of_node,
300 "qcom,gpu-qtimer", NULL))
301 return;
302
303 if (of_property_read_u32_array(device->pdev->dev.of_node,
304 "qcom,gpu-qtimer", gpu_qtimer_entry, 2)) {
305 KGSL_CORE_ERR("Failed to read gpu qtimer dts entry\n");
306 return;
307 }
308
Lynus Vaz90d98b52018-04-09 14:45:36 +0530309 kgsl_memdesc_init(device, &gpu_qtimer_desc, 0);
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700310 gpu_qtimer_desc.priv = 0;
311 gpu_qtimer_desc.physaddr = gpu_qtimer_entry[0];
312 gpu_qtimer_desc.size = gpu_qtimer_entry[1];
313 gpu_qtimer_desc.pagetable = NULL;
314 gpu_qtimer_desc.ops = NULL;
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700315 gpu_qtimer_desc.hostptr = NULL;
316
317 result = memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
318 gpu_qtimer_desc.size);
319 if (result) {
320 KGSL_CORE_ERR("memdesc_sg_dma failed: %d\n", result);
321 return;
322 }
323
324 kgsl_mmu_add_global(device, &gpu_qtimer_desc, "gpu-qtimer");
325}
326
327static inline void kgsl_cleanup_qtimer_desc(struct kgsl_mmu *mmu)
328{
329 kgsl_iommu_remove_global(mmu, &gpu_qtimer_desc);
330 kgsl_sharedmem_free(&gpu_qtimer_desc);
331}
Shrenuj Bansala419c792016-10-20 14:05:11 -0700332
333static inline void _iommu_sync_mmu_pc(bool lock)
334{
335 if (need_iommu_sync == false)
336 return;
337
338 if (lock)
339 mutex_lock(&kgsl_mmu_sync);
340 else
341 mutex_unlock(&kgsl_mmu_sync);
342}
343
344static void _detach_pt(struct kgsl_iommu_pt *iommu_pt,
345 struct kgsl_iommu_context *ctx)
346{
347 if (iommu_pt->attached) {
348 _iommu_sync_mmu_pc(true);
349 iommu_detach_device(iommu_pt->domain, ctx->dev);
350 _iommu_sync_mmu_pc(false);
351 iommu_pt->attached = false;
352 }
353}
354
355static int _attach_pt(struct kgsl_iommu_pt *iommu_pt,
356 struct kgsl_iommu_context *ctx)
357{
358 int ret;
359
360 if (iommu_pt->attached)
361 return 0;
362
363 _iommu_sync_mmu_pc(true);
364 ret = iommu_attach_device(iommu_pt->domain, ctx->dev);
365 _iommu_sync_mmu_pc(false);
366
367 if (ret == 0)
368 iommu_pt->attached = true;
369
370 return ret;
371}
372
Shrenuj Bansala419c792016-10-20 14:05:11 -0700373static int _iommu_map_sync_pc(struct kgsl_pagetable *pt,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700374 uint64_t gpuaddr, phys_addr_t physaddr,
375 uint64_t size, unsigned int flags)
376{
377 struct kgsl_iommu_pt *iommu_pt = pt->priv;
378 int ret;
379
Shrenuj Bansala419c792016-10-20 14:05:11 -0700380 _iommu_sync_mmu_pc(true);
381
382 ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr, size, flags);
383
384 _iommu_sync_mmu_pc(false);
385
Shrenuj Bansala419c792016-10-20 14:05:11 -0700386 if (ret) {
387 KGSL_CORE_ERR("map err: 0x%016llX, 0x%llx, 0x%x, %d\n",
388 gpuaddr, size, flags, ret);
389 return -ENODEV;
390 }
391
392 return 0;
393}
394
395static int _iommu_unmap_sync_pc(struct kgsl_pagetable *pt,
Carter Coopera1c7cce2017-12-15 13:29:29 -0700396 uint64_t addr, uint64_t size)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700397{
398 struct kgsl_iommu_pt *iommu_pt = pt->priv;
399 size_t unmapped = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700400
401 _iommu_sync_mmu_pc(true);
402
403 unmapped = iommu_unmap(iommu_pt->domain, addr, size);
404
405 _iommu_sync_mmu_pc(false);
406
Shrenuj Bansala419c792016-10-20 14:05:11 -0700407 if (unmapped != size) {
408 KGSL_CORE_ERR("unmap err: 0x%016llx, 0x%llx, %zd\n",
409 addr, size, unmapped);
410 return -ENODEV;
411 }
412
413 return 0;
414}
415
416static int _iommu_map_sg_offset_sync_pc(struct kgsl_pagetable *pt,
Carter Coopera1c7cce2017-12-15 13:29:29 -0700417 uint64_t addr, struct scatterlist *sg, int nents,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700418 uint64_t offset, uint64_t size, unsigned int flags)
419{
420 struct kgsl_iommu_pt *iommu_pt = pt->priv;
421 uint64_t offset_tmp = offset;
422 uint64_t size_tmp = size;
423 size_t mapped = 0;
424 unsigned int i;
425 struct scatterlist *s;
426 phys_addr_t physaddr;
427 int ret;
428
Shrenuj Bansala419c792016-10-20 14:05:11 -0700429 _iommu_sync_mmu_pc(true);
430
431 for_each_sg(sg, s, nents, i) {
432 /* Iterate until we find the offset */
433 if (offset_tmp >= s->length) {
434 offset_tmp -= s->length;
435 continue;
436 }
437
438 /* How much mapping is needed in this sg? */
439 if (size < s->length - offset_tmp)
440 size_tmp = size;
441 else
442 size_tmp = s->length - offset_tmp;
443
444 /* Get the phys addr for the offset page */
445 if (offset_tmp != 0) {
446 physaddr = page_to_phys(nth_page(sg_page(s),
447 offset_tmp >> PAGE_SHIFT));
448 /* Reset offset_tmp */
449 offset_tmp = 0;
450 } else
451 physaddr = page_to_phys(sg_page(s));
452
453 /* Do the map for this sg */
454 ret = iommu_map(iommu_pt->domain, addr + mapped,
455 physaddr, size_tmp, flags);
456 if (ret)
457 break;
458
459 mapped += size_tmp;
460 size -= size_tmp;
461
462 if (size == 0)
463 break;
464 }
465
466 _iommu_sync_mmu_pc(false);
467
Shrenuj Bansala419c792016-10-20 14:05:11 -0700468 if (size != 0) {
469 /* Cleanup on error */
Carter Coopera1c7cce2017-12-15 13:29:29 -0700470 _iommu_unmap_sync_pc(pt, addr, mapped);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700471 KGSL_CORE_ERR(
472 "map sg offset err: 0x%016llX, %d, %x, %zd\n",
473 addr, nents, flags, mapped);
474 return -ENODEV;
475 }
476
477 return 0;
478}
479
480static int _iommu_map_sg_sync_pc(struct kgsl_pagetable *pt,
Carter Coopera1c7cce2017-12-15 13:29:29 -0700481 uint64_t addr, struct scatterlist *sg, int nents,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700482 unsigned int flags)
483{
484 struct kgsl_iommu_pt *iommu_pt = pt->priv;
485 size_t mapped;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700486
487 _iommu_sync_mmu_pc(true);
488
489 mapped = iommu_map_sg(iommu_pt->domain, addr, sg, nents, flags);
490
491 _iommu_sync_mmu_pc(false);
492
Shrenuj Bansala419c792016-10-20 14:05:11 -0700493 if (mapped == 0) {
494 KGSL_CORE_ERR("map sg err: 0x%016llX, %d, %x, %zd\n",
495 addr, nents, flags, mapped);
496 return -ENODEV;
497 }
498
499 return 0;
500}
501
502/*
503 * One page allocation for a guard region to protect against over-zealous
504 * GPU pre-fetch
505 */
506
507static struct page *kgsl_guard_page;
508static struct kgsl_memdesc kgsl_secure_guard_page_memdesc;
509
510/*
511 * The dummy page is a placeholder/extra page to be used for sparse mappings.
512 * This page will be mapped to all virtual sparse bindings that are not
513 * physically backed.
514 */
515static struct page *kgsl_dummy_page;
516
517/* These functions help find the nearest allocated memory entries on either side
518 * of a faulting address. If we know the nearby allocations memory we can
519 * get a better determination of what we think should have been located in the
520 * faulting region
521 */
522
523/*
524 * A local structure to make it easy to store the interesting bits for the
525 * memory entries on either side of the faulting address
526 */
527
528struct _mem_entry {
529 uint64_t gpuaddr;
530 uint64_t size;
531 uint64_t flags;
532 unsigned int priv;
533 int pending_free;
534 pid_t pid;
535 char name[32];
536};
537
538static void _get_global_entries(uint64_t faultaddr,
539 struct _mem_entry *prev,
540 struct _mem_entry *next)
541{
542 int i;
543 uint64_t prevaddr = 0;
544 struct global_pt_entry *p = NULL;
545
546 uint64_t nextaddr = (uint64_t) -1;
547 struct global_pt_entry *n = NULL;
548
549 for (i = 0; i < global_pt_count; i++) {
550 uint64_t addr;
551
552 if (global_pt_entries[i].memdesc == NULL)
553 continue;
554
555 addr = global_pt_entries[i].memdesc->gpuaddr;
556 if ((addr < faultaddr) && (addr > prevaddr)) {
557 prevaddr = addr;
558 p = &global_pt_entries[i];
559 }
560
561 if ((addr > faultaddr) && (addr < nextaddr)) {
562 nextaddr = addr;
563 n = &global_pt_entries[i];
564 }
565 }
566
567 if (p != NULL) {
568 prev->gpuaddr = p->memdesc->gpuaddr;
569 prev->size = p->memdesc->size;
570 prev->flags = p->memdesc->flags;
571 prev->priv = p->memdesc->priv;
572 prev->pid = 0;
573 strlcpy(prev->name, p->name, sizeof(prev->name));
574 }
575
576 if (n != NULL) {
577 next->gpuaddr = n->memdesc->gpuaddr;
578 next->size = n->memdesc->size;
579 next->flags = n->memdesc->flags;
580 next->priv = n->memdesc->priv;
581 next->pid = 0;
582 strlcpy(next->name, n->name, sizeof(next->name));
583 }
584}
585
586void __kgsl_get_memory_usage(struct _mem_entry *entry)
587{
588 kgsl_get_memory_usage(entry->name, sizeof(entry->name), entry->flags);
589}
590
591static void _get_entries(struct kgsl_process_private *private,
592 uint64_t faultaddr, struct _mem_entry *prev,
593 struct _mem_entry *next)
594{
595 int id;
596 struct kgsl_mem_entry *entry;
597
598 uint64_t prevaddr = 0;
599 struct kgsl_mem_entry *p = NULL;
600
601 uint64_t nextaddr = (uint64_t) -1;
602 struct kgsl_mem_entry *n = NULL;
603
604 idr_for_each_entry(&private->mem_idr, entry, id) {
605 uint64_t addr = entry->memdesc.gpuaddr;
606
607 if ((addr < faultaddr) && (addr > prevaddr)) {
608 prevaddr = addr;
609 p = entry;
610 }
611
612 if ((addr > faultaddr) && (addr < nextaddr)) {
613 nextaddr = addr;
614 n = entry;
615 }
616 }
617
618 if (p != NULL) {
619 prev->gpuaddr = p->memdesc.gpuaddr;
620 prev->size = p->memdesc.size;
621 prev->flags = p->memdesc.flags;
622 prev->priv = p->memdesc.priv;
623 prev->pending_free = p->pending_free;
624 prev->pid = private->pid;
625 __kgsl_get_memory_usage(prev);
626 }
627
628 if (n != NULL) {
629 next->gpuaddr = n->memdesc.gpuaddr;
630 next->size = n->memdesc.size;
631 next->flags = n->memdesc.flags;
632 next->priv = n->memdesc.priv;
633 next->pending_free = n->pending_free;
634 next->pid = private->pid;
635 __kgsl_get_memory_usage(next);
636 }
637}
638
639static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
640 struct _mem_entry *preventry, struct _mem_entry *nextentry,
641 struct kgsl_context *context)
642{
643 struct kgsl_process_private *private;
644
645 memset(preventry, 0, sizeof(*preventry));
646 memset(nextentry, 0, sizeof(*nextentry));
647
648 /* Set the maximum possible size as an initial value */
649 nextentry->gpuaddr = (uint64_t) -1;
650
Deepak Kumar756d6a92017-11-28 16:58:29 +0530651 if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
Shrenuj Bansala419c792016-10-20 14:05:11 -0700652 _get_global_entries(faultaddr, preventry, nextentry);
653 } else if (context) {
654 private = context->proc_priv;
655 spin_lock(&private->mem_lock);
656 _get_entries(private, faultaddr, preventry, nextentry);
657 spin_unlock(&private->mem_lock);
658 }
659}
660
661static void _print_entry(struct kgsl_device *device, struct _mem_entry *entry)
662{
663 KGSL_LOG_DUMP(device,
664 "[%016llX - %016llX] %s %s (pid = %d) (%s)\n",
665 entry->gpuaddr,
666 entry->gpuaddr + entry->size,
667 entry->priv & KGSL_MEMDESC_GUARD_PAGE ? "(+guard)" : "",
668 entry->pending_free ? "(pending free)" : "",
669 entry->pid, entry->name);
670}
671
672static void _check_if_freed(struct kgsl_iommu_context *ctx,
673 uint64_t addr, pid_t ptname)
674{
675 uint64_t gpuaddr = addr;
676 uint64_t size = 0;
677 uint64_t flags = 0;
678 pid_t pid;
679
680 char name[32];
681
682 memset(name, 0, sizeof(name));
683
684 if (kgsl_memfree_find_entry(ptname, &gpuaddr, &size, &flags, &pid)) {
685 kgsl_get_memory_usage(name, sizeof(name) - 1, flags);
686 KGSL_LOG_DUMP(ctx->kgsldev, "---- premature free ----\n");
687 KGSL_LOG_DUMP(ctx->kgsldev,
688 "[%8.8llX-%8.8llX] (%s) was already freed by pid %d\n",
689 gpuaddr, gpuaddr + size, name, pid);
690 }
691}
692
693static bool
694kgsl_iommu_uche_overfetch(struct kgsl_process_private *private,
695 uint64_t faultaddr)
696{
697 int id;
698 struct kgsl_mem_entry *entry = NULL;
699
700 spin_lock(&private->mem_lock);
701 idr_for_each_entry(&private->mem_idr, entry, id) {
702 struct kgsl_memdesc *m = &entry->memdesc;
703
704 if ((faultaddr >= (m->gpuaddr + m->size))
705 && (faultaddr < (m->gpuaddr + m->size + 64))) {
706 spin_unlock(&private->mem_lock);
707 return true;
708 }
709 }
710 spin_unlock(&private->mem_lock);
711 return false;
712}
713
714/*
715 * Read pagefaults where the faulting address lies within the first 64 bytes
716 * of a page (UCHE line size is 64 bytes) and the fault page is preceded by a
717 * valid allocation are considered likely due to UCHE overfetch and suppressed.
718 */
719
720static bool kgsl_iommu_suppress_pagefault(uint64_t faultaddr, int write,
721 struct kgsl_context *context)
722{
723 /*
724 * If there is no context associated with the pagefault then this
725 * could be a fault on a global buffer. We do not suppress faults
726 * on global buffers as they are mainly accessed by the CP bypassing
727 * the UCHE. Also, write pagefaults are never suppressed.
728 */
729 if (!context || write)
730 return false;
731
732 return kgsl_iommu_uche_overfetch(context->proc_priv, faultaddr);
733}
734
735static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
736 struct device *dev, unsigned long addr, int flags, void *token)
737{
738 int ret = 0;
739 struct kgsl_pagetable *pt = token;
740 struct kgsl_mmu *mmu = pt->mmu;
741 struct kgsl_iommu *iommu;
742 struct kgsl_iommu_context *ctx;
743 u64 ptbase;
744 u32 contextidr;
Lynus Vaze0a01312017-11-08 19:39:31 +0530745 pid_t pid = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700746 pid_t ptname;
747 struct _mem_entry prev, next;
748 int write;
749 struct kgsl_device *device;
750 struct adreno_device *adreno_dev;
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530751 struct adreno_gpudev *gpudev;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700752 unsigned int no_page_fault_log = 0;
753 unsigned int curr_context_id = 0;
754 struct kgsl_context *context;
755 char *fault_type = "unknown";
756
757 static DEFINE_RATELIMIT_STATE(_rs,
758 DEFAULT_RATELIMIT_INTERVAL,
759 DEFAULT_RATELIMIT_BURST);
760
761 if (mmu == NULL)
762 return ret;
763
764 iommu = _IOMMU_PRIV(mmu);
765 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
766 device = KGSL_MMU_DEVICE(mmu);
767 adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530768 gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700769
770 if (pt->name == KGSL_MMU_SECURE_PT)
771 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
772
773 /*
774 * set the fault bits and stuff before any printks so that if fault
775 * handler runs then it will know it's dealing with a pagefault.
776 * Read the global current timestamp because we could be in middle of
777 * RB switch and hence the cur RB may not be reliable but global
778 * one will always be reliable
779 */
780 kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
781 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
782
783 context = kgsl_context_get(device, curr_context_id);
784
785 write = (flags & IOMMU_FAULT_WRITE) ? 1 : 0;
786 if (flags & IOMMU_FAULT_TRANSLATION)
787 fault_type = "translation";
788 else if (flags & IOMMU_FAULT_PERMISSION)
789 fault_type = "permission";
Deepak Kumar8267e992018-04-26 11:16:55 +0530790 else if (flags & IOMMU_FAULT_EXTERNAL)
791 fault_type = "external";
792 else if (flags & IOMMU_FAULT_TRANSACTION_STALLED)
793 fault_type = "transaction stalled";
Shrenuj Bansala419c792016-10-20 14:05:11 -0700794
795 if (kgsl_iommu_suppress_pagefault(addr, write, context)) {
796 iommu->pagefault_suppression_count++;
797 kgsl_context_put(context);
798 return ret;
799 }
800
801 if (context != NULL) {
802 /* save pagefault timestamp for GFT */
803 set_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, &context->priv);
Lynus Vaze0a01312017-11-08 19:39:31 +0530804 pid = context->proc_priv->pid;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700805 }
806
807 ctx->fault = 1;
808
809 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
810 &adreno_dev->ft_pf_policy) &&
811 (flags & IOMMU_FAULT_TRANSACTION_STALLED)) {
812 /*
813 * Turn off GPU IRQ so we don't get faults from it too.
814 * The device mutex must be held to change power state
815 */
816 mutex_lock(&device->mutex);
817 kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
818 mutex_unlock(&device->mutex);
819 }
820
821 ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
822 contextidr = KGSL_IOMMU_GET_CTX_REG(ctx, CONTEXTIDR);
823
824 ptname = MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ?
Lynus Vaze0a01312017-11-08 19:39:31 +0530825 KGSL_MMU_GLOBAL_PT : pid;
Sunil Khatri86e95682017-01-23 17:10:32 +0530826 /*
827 * Trace needs to be logged before searching the faulting
828 * address in free list as it takes quite long time in
829 * search and delays the trace unnecessarily.
830 */
831 trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
832 ptname, write ? "write" : "read");
Shrenuj Bansala419c792016-10-20 14:05:11 -0700833
834 if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE,
835 &adreno_dev->ft_pf_policy))
836 no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
837
838 if (!no_page_fault_log && __ratelimit(&_rs)) {
Rajesh Kemisettic05883a2018-09-17 11:34:08 +0530839 const char *api_str;
840
841 if (context != NULL) {
842 struct adreno_context *drawctxt =
843 ADRENO_CONTEXT(context);
844
845 api_str = get_api_type_str(drawctxt->type);
846 } else
847 api_str = "UNKNOWN";
848
Shrenuj Bansala419c792016-10-20 14:05:11 -0700849 KGSL_MEM_CRIT(ctx->kgsldev,
850 "GPU PAGE FAULT: addr = %lX pid= %d\n", addr, ptname);
851 KGSL_MEM_CRIT(ctx->kgsldev,
Rajesh Kemisettic05883a2018-09-17 11:34:08 +0530852 "context=%s ctx_type=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
853 ctx->name, api_str, ptbase, contextidr,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700854 write ? "write" : "read", fault_type);
855
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530856 if (gpudev->iommu_fault_block) {
857 unsigned int fsynr1;
858
859 fsynr1 = KGSL_IOMMU_GET_CTX_REG(ctx, FSYNR1);
860 KGSL_MEM_CRIT(ctx->kgsldev,
861 "FAULTING BLOCK: %s\n",
862 gpudev->iommu_fault_block(adreno_dev,
863 fsynr1));
864 }
865
Shrenuj Bansala419c792016-10-20 14:05:11 -0700866 /* Don't print the debug if this is a permissions fault */
867 if (!(flags & IOMMU_FAULT_PERMISSION)) {
868 _check_if_freed(ctx, addr, ptname);
869
870 KGSL_LOG_DUMP(ctx->kgsldev,
871 "---- nearby memory ----\n");
872
873 _find_mem_entries(mmu, addr, &prev, &next, context);
874 if (prev.gpuaddr)
875 _print_entry(ctx->kgsldev, &prev);
876 else
877 KGSL_LOG_DUMP(ctx->kgsldev, "*EMPTY*\n");
878
879 KGSL_LOG_DUMP(ctx->kgsldev, " <- fault @ %8.8lX\n",
880 addr);
881
882 if (next.gpuaddr != (uint64_t) -1)
883 _print_entry(ctx->kgsldev, &next);
884 else
885 KGSL_LOG_DUMP(ctx->kgsldev, "*EMPTY*\n");
886 }
887 }
888
Shrenuj Bansala419c792016-10-20 14:05:11 -0700889
890 /*
891 * We do not want the h/w to resume fetching data from an iommu
892 * that has faulted, this is better for debugging as it will stall
893 * the GPU and trigger a snapshot. Return EBUSY error.
894 */
895 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
896 &adreno_dev->ft_pf_policy) &&
897 (flags & IOMMU_FAULT_TRANSACTION_STALLED)) {
898 uint32_t sctlr_val;
899
900 ret = -EBUSY;
901 /*
902 * Disable context fault interrupts
903 * as we do not clear FSR in the ISR.
904 * Will be re-enabled after FSR is cleared.
905 */
906 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
907 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
908 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
909
910 adreno_set_gpu_fault(adreno_dev, ADRENO_IOMMU_PAGE_FAULT);
911 /* Go ahead with recovery*/
912 adreno_dispatcher_schedule(device);
913 }
914
915 kgsl_context_put(context);
916 return ret;
917}
918
919/*
920 * kgsl_iommu_disable_clk() - Disable iommu clocks
921 * Disable IOMMU clocks
922 */
923static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
924{
925 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
926 int j;
927
928 atomic_dec(&iommu->clk_enable_count);
929
930 /*
931 * Make sure the clk refcounts are good. An unbalance may
932 * cause the clocks to be off when we need them on.
933 */
934 WARN_ON(atomic_read(&iommu->clk_enable_count) < 0);
935
936 for (j = (KGSL_IOMMU_MAX_CLKS - 1); j >= 0; j--)
937 if (iommu->clks[j])
938 clk_disable_unprepare(iommu->clks[j]);
939}
940
941/*
942 * kgsl_iommu_enable_clk_prepare_enable - Enable the specified IOMMU clock
943 * Try 4 times to enable it and then BUG() for debug
944 */
945static void kgsl_iommu_clk_prepare_enable(struct clk *clk)
946{
947 int num_retries = 4;
948
949 while (num_retries--) {
950 if (!clk_prepare_enable(clk))
951 return;
952 }
953
954 /* Failure is fatal so BUG() to facilitate debug */
955 KGSL_CORE_ERR("IOMMU clock enable failed\n");
956 BUG();
957}
958
959/*
960 * kgsl_iommu_enable_clk - Enable iommu clocks
961 * Enable all the IOMMU clocks
962 */
963static void kgsl_iommu_enable_clk(struct kgsl_mmu *mmu)
964{
965 int j;
966 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
967
968 for (j = 0; j < KGSL_IOMMU_MAX_CLKS; j++) {
969 if (iommu->clks[j])
970 kgsl_iommu_clk_prepare_enable(iommu->clks[j]);
971 }
972 atomic_inc(&iommu->clk_enable_count);
973}
974
975/* kgsl_iommu_get_ttbr0 - Get TTBR0 setting for a pagetable */
976static u64 kgsl_iommu_get_ttbr0(struct kgsl_pagetable *pt)
977{
978 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
979
980 BUG_ON(iommu_pt == NULL);
981
982 return iommu_pt->ttbr0;
983}
984
985static bool kgsl_iommu_pt_equal(struct kgsl_mmu *mmu,
986 struct kgsl_pagetable *pt,
987 u64 ttbr0)
988{
989 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
990 u64 domain_ttbr0;
991
992 if (iommu_pt == NULL)
993 return 0;
994
995 domain_ttbr0 = kgsl_iommu_get_ttbr0(pt);
996
997 return (domain_ttbr0 == ttbr0);
998}
999
1000/* kgsl_iommu_get_contextidr - query CONTEXTIDR setting for a pagetable */
1001static u32 kgsl_iommu_get_contextidr(struct kgsl_pagetable *pt)
1002{
1003 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
1004
1005 BUG_ON(iommu_pt == NULL);
1006
1007 return iommu_pt->contextidr;
1008}
1009
1010/*
1011 * kgsl_iommu_destroy_pagetable - Free up reaources help by a pagetable
1012 * @mmu_specific_pt - Pointer to pagetable which is to be freed
1013 *
1014 * Return - void
1015 */
1016static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt)
1017{
1018 struct kgsl_iommu_pt *iommu_pt = pt->priv;
1019 struct kgsl_mmu *mmu = pt->mmu;
1020 struct kgsl_iommu *iommu;
1021 struct kgsl_iommu_context *ctx;
1022
1023 /*
1024 * Make sure all allocations are unmapped before destroying
1025 * the pagetable
1026 */
1027 WARN_ON(!list_empty(&pt->list));
1028
1029 iommu = _IOMMU_PRIV(mmu);
1030
1031 if (pt->name == KGSL_MMU_SECURE_PT) {
1032 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
Shrenuj Bansala419c792016-10-20 14:05:11 -07001033 } else {
1034 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1035 kgsl_iommu_unmap_globals(pt);
1036 }
1037
1038 if (iommu_pt->domain) {
1039 trace_kgsl_pagetable_destroy(iommu_pt->ttbr0, pt->name);
1040
1041 _detach_pt(iommu_pt, ctx);
1042
1043 iommu_domain_free(iommu_pt->domain);
1044 }
1045
1046 kfree(iommu_pt);
1047}
1048
1049static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
1050 struct kgsl_pagetable *pagetable,
1051 struct kgsl_iommu_pt *pt)
1052{
Shrenuj Bansala419c792016-10-20 14:05:11 -07001053 if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
Deepak Kumar756d6a92017-11-28 16:58:29 +05301054 pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1055 pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
1056 pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1057 pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001058 } else {
1059 pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301060 pt->compat_va_end = KGSL_IOMMU_SECURE_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001061 pt->va_start = KGSL_IOMMU_VA_BASE64;
1062 pt->va_end = KGSL_IOMMU_VA_END64;
1063 }
1064
1065 if (pagetable->name != KGSL_MMU_GLOBAL_PT &&
1066 pagetable->name != KGSL_MMU_SECURE_PT) {
Deepak Kumarcf056d12018-04-17 15:59:42 +05301067 if (kgsl_is_compat_task()) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07001068 pt->svm_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301069 pt->svm_end = KGSL_IOMMU_SECURE_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001070 } else {
1071 pt->svm_start = KGSL_IOMMU_SVM_BASE64;
1072 pt->svm_end = KGSL_IOMMU_SVM_END64;
1073 }
1074 }
1075}
1076
1077static void setup_32bit_pagetable(struct kgsl_mmu *mmu,
1078 struct kgsl_pagetable *pagetable,
1079 struct kgsl_iommu_pt *pt)
1080{
Shrenuj Bansala419c792016-10-20 14:05:11 -07001081 if (mmu->secured) {
1082 if (pagetable->name == KGSL_MMU_SECURE_PT) {
Deepak Kumar756d6a92017-11-28 16:58:29 +05301083 pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1084 pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
1085 pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1086 pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001087 } else {
1088 pt->va_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301089 pt->va_end = KGSL_IOMMU_SECURE_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001090 pt->compat_va_start = pt->va_start;
1091 pt->compat_va_end = pt->va_end;
1092 }
1093 } else {
1094 pt->va_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301095 pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001096 pt->compat_va_start = pt->va_start;
1097 pt->compat_va_end = pt->va_end;
1098 }
1099
1100 if (pagetable->name != KGSL_MMU_GLOBAL_PT &&
1101 pagetable->name != KGSL_MMU_SECURE_PT) {
1102 pt->svm_start = KGSL_IOMMU_SVM_BASE32;
1103 pt->svm_end = KGSL_IOMMU_SVM_END32;
1104 }
1105}
1106
1107
1108static struct kgsl_iommu_pt *
1109_alloc_pt(struct device *dev, struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1110{
1111 struct kgsl_iommu_pt *iommu_pt;
1112 struct bus_type *bus = kgsl_mmu_get_bus(dev);
1113
1114 if (bus == NULL)
1115 return ERR_PTR(-ENODEV);
1116
1117 iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
1118 if (iommu_pt == NULL)
1119 return ERR_PTR(-ENOMEM);
1120
1121 iommu_pt->domain = iommu_domain_alloc(bus);
1122 if (iommu_pt->domain == NULL) {
1123 kfree(iommu_pt);
1124 return ERR_PTR(-ENODEV);
1125 }
1126
1127 pt->pt_ops = &iommu_pt_ops;
1128 pt->priv = iommu_pt;
1129 pt->fault_addr = ~0ULL;
1130 iommu_pt->rbtree = RB_ROOT;
1131
1132 if (MMU_FEATURE(mmu, KGSL_MMU_64BIT))
1133 setup_64bit_pagetable(mmu, pt, iommu_pt);
1134 else
1135 setup_32bit_pagetable(mmu, pt, iommu_pt);
1136
1137
1138 return iommu_pt;
1139}
1140
1141static void _free_pt(struct kgsl_iommu_context *ctx, struct kgsl_pagetable *pt)
1142{
1143 struct kgsl_iommu_pt *iommu_pt = pt->priv;
1144
1145 pt->pt_ops = NULL;
1146 pt->priv = NULL;
1147
1148 if (iommu_pt == NULL)
1149 return;
1150
1151 _detach_pt(iommu_pt, ctx);
1152
1153 if (iommu_pt->domain != NULL)
1154 iommu_domain_free(iommu_pt->domain);
1155 kfree(iommu_pt);
1156}
1157
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001158void _enable_gpuhtw_llc(struct kgsl_mmu *mmu, struct kgsl_iommu_pt *iommu_pt)
1159{
1160 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
1161 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1162 int gpuhtw_llc_enable = 1;
1163 int ret;
1164
1165 /* GPU pagetable walk LLC slice not enabled */
1166 if (!adreno_dev->gpuhtw_llc_slice)
1167 return;
1168
1169 /* Domain attribute to enable system cache for GPU pagetable walks */
1170 ret = iommu_domain_set_attr(iommu_pt->domain,
1171 DOMAIN_ATTR_USE_UPSTREAM_HINT, &gpuhtw_llc_enable);
1172 /*
1173 * Warn that the system cache will not be used for GPU
1174 * pagetable walks. This is not a fatal error.
1175 */
1176 WARN_ONCE(ret,
1177 "System cache not enabled for GPU pagetable walks: %d\n", ret);
1178}
1179
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -07001180static int program_smmu_aperture(unsigned int cb, unsigned int aperture_reg)
1181{
1182 struct scm_desc desc = {0};
1183
1184 desc.args[0] = 0xFFFF0000 | ((aperture_reg & 0xff) << 8) | (cb & 0xff);
1185 desc.args[1] = 0xFFFFFFFF;
1186 desc.args[2] = 0xFFFFFFFF;
1187 desc.args[3] = 0xFFFFFFFF;
1188 desc.arginfo = SCM_ARGS(4);
1189
Sunil Khatri82eb1ec2018-01-09 15:28:14 +05301190 return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, CP_SMMU_APERTURE_ID), &desc);
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -07001191}
1192
Shrenuj Bansala419c792016-10-20 14:05:11 -07001193static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1194{
1195 int ret = 0;
1196 struct kgsl_iommu_pt *iommu_pt = NULL;
1197 unsigned int cb_num;
1198 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1199 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1200
1201 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1202
1203 if (IS_ERR(iommu_pt))
1204 return PTR_ERR(iommu_pt);
1205
1206 if (kgsl_mmu_is_perprocess(mmu)) {
1207 ret = iommu_domain_set_attr(iommu_pt->domain,
1208 DOMAIN_ATTR_PROCID, &pt->name);
1209 if (ret) {
1210 KGSL_CORE_ERR("set DOMAIN_ATTR_PROCID failed: %d\n",
1211 ret);
1212 goto done;
1213 }
1214 }
1215
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001216 _enable_gpuhtw_llc(mmu, iommu_pt);
1217
Shrenuj Bansala419c792016-10-20 14:05:11 -07001218 ret = _attach_pt(iommu_pt, ctx);
1219 if (ret)
1220 goto done;
1221
1222 iommu_set_fault_handler(iommu_pt->domain,
1223 kgsl_iommu_fault_handler, pt);
1224
1225 ret = iommu_domain_get_attr(iommu_pt->domain,
1226 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1227 if (ret) {
Shrenuj Bansalc3b15ce2017-06-15 14:48:05 -07001228 KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXT_BANK failed: %d\n",
Shrenuj Bansala419c792016-10-20 14:05:11 -07001229 ret);
1230 goto done;
1231 }
1232
Sunil Khatri82eb1ec2018-01-09 15:28:14 +05301233 if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) &&
1234 scm_is_call_available(SCM_SVC_MP, CP_SMMU_APERTURE_ID)) {
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -07001235 ret = program_smmu_aperture(cb_num, CP_APERTURE_REG);
1236 if (ret) {
1237 pr_err("SMMU aperture programming call failed with error %d\n",
1238 ret);
1239 return ret;
1240 }
1241 }
1242
Shrenuj Bansala419c792016-10-20 14:05:11 -07001243 ctx->cb_num = cb_num;
1244 ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
1245 + (cb_num << KGSL_IOMMU_CB_SHIFT);
1246
1247 ret = iommu_domain_get_attr(iommu_pt->domain,
1248 DOMAIN_ATTR_TTBR0, &iommu_pt->ttbr0);
1249 if (ret) {
1250 KGSL_CORE_ERR("get DOMAIN_ATTR_TTBR0 failed: %d\n",
1251 ret);
1252 goto done;
1253 }
1254 ret = iommu_domain_get_attr(iommu_pt->domain,
1255 DOMAIN_ATTR_CONTEXTIDR, &iommu_pt->contextidr);
1256 if (ret) {
1257 KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXTIDR failed: %d\n",
1258 ret);
1259 goto done;
1260 }
1261
1262 ret = kgsl_iommu_map_globals(pt);
1263
1264done:
1265 if (ret)
1266 _free_pt(ctx, pt);
1267
1268 return ret;
1269}
1270
1271static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1272{
1273 int ret = 0;
1274 struct kgsl_iommu_pt *iommu_pt = NULL;
1275 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1276 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
1277 int secure_vmid = VMID_CP_PIXEL;
1278 unsigned int cb_num;
1279
1280 if (!mmu->secured)
1281 return -EPERM;
1282
1283 if (!MMU_FEATURE(mmu, KGSL_MMU_HYP_SECURE_ALLOC)) {
1284 if (!kgsl_mmu_bus_secured(ctx->dev))
1285 return -EPERM;
1286 }
1287
1288 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1289
1290 if (IS_ERR(iommu_pt))
1291 return PTR_ERR(iommu_pt);
1292
1293 ret = iommu_domain_set_attr(iommu_pt->domain,
1294 DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
1295 if (ret) {
1296 KGSL_CORE_ERR("set DOMAIN_ATTR_SECURE_VMID failed: %d\n", ret);
1297 goto done;
1298 }
1299
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001300 _enable_gpuhtw_llc(mmu, iommu_pt);
1301
Shrenuj Bansala419c792016-10-20 14:05:11 -07001302 ret = _attach_pt(iommu_pt, ctx);
1303
1304 if (MMU_FEATURE(mmu, KGSL_MMU_HYP_SECURE_ALLOC))
1305 iommu_set_fault_handler(iommu_pt->domain,
1306 kgsl_iommu_fault_handler, pt);
1307
1308 ret = iommu_domain_get_attr(iommu_pt->domain,
1309 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1310 if (ret) {
1311 KGSL_CORE_ERR("get DOMAIN_ATTR_PROCID failed: %d\n",
1312 ret);
1313 goto done;
1314 }
1315
1316 ctx->cb_num = cb_num;
1317 ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
1318 + (cb_num << KGSL_IOMMU_CB_SHIFT);
1319
Shrenuj Bansala419c792016-10-20 14:05:11 -07001320done:
1321 if (ret)
1322 _free_pt(ctx, pt);
1323 return ret;
1324}
1325
1326static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1327{
1328 int ret = 0;
1329 struct kgsl_iommu_pt *iommu_pt = NULL;
1330 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1331 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1332 int dynamic = 1;
1333 unsigned int cb_num = ctx->cb_num;
1334
1335 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1336
1337 if (IS_ERR(iommu_pt))
1338 return PTR_ERR(iommu_pt);
1339
1340 ret = iommu_domain_set_attr(iommu_pt->domain,
1341 DOMAIN_ATTR_DYNAMIC, &dynamic);
1342 if (ret) {
1343 KGSL_CORE_ERR("set DOMAIN_ATTR_DYNAMIC failed: %d\n", ret);
1344 goto done;
1345 }
1346 ret = iommu_domain_set_attr(iommu_pt->domain,
1347 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1348 if (ret) {
1349 KGSL_CORE_ERR("set DOMAIN_ATTR_CONTEXT_BANK failed: %d\n", ret);
1350 goto done;
1351 }
1352
1353 ret = iommu_domain_set_attr(iommu_pt->domain,
1354 DOMAIN_ATTR_PROCID, &pt->name);
1355 if (ret) {
1356 KGSL_CORE_ERR("set DOMAIN_ATTR_PROCID failed: %d\n", ret);
1357 goto done;
1358 }
1359
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001360 _enable_gpuhtw_llc(mmu, iommu_pt);
1361
Shrenuj Bansala419c792016-10-20 14:05:11 -07001362 ret = _attach_pt(iommu_pt, ctx);
1363 if (ret)
1364 goto done;
1365
1366 /* now read back the attributes needed for self programming */
1367 ret = iommu_domain_get_attr(iommu_pt->domain,
1368 DOMAIN_ATTR_TTBR0, &iommu_pt->ttbr0);
1369 if (ret) {
1370 KGSL_CORE_ERR("get DOMAIN_ATTR_TTBR0 failed: %d\n", ret);
1371 goto done;
1372 }
1373
1374 ret = iommu_domain_get_attr(iommu_pt->domain,
1375 DOMAIN_ATTR_CONTEXTIDR, &iommu_pt->contextidr);
1376 if (ret) {
1377 KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXTIDR failed: %d\n", ret);
1378 goto done;
1379 }
1380
1381 ret = kgsl_iommu_map_globals(pt);
1382
1383done:
1384 if (ret)
1385 _free_pt(ctx, pt);
1386
1387 return ret;
1388}
1389
1390/* kgsl_iommu_init_pt - Set up an IOMMU pagetable */
1391static int kgsl_iommu_init_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1392{
1393 if (pt == NULL)
1394 return -EINVAL;
1395
1396 switch (pt->name) {
1397 case KGSL_MMU_GLOBAL_PT:
1398 return _init_global_pt(mmu, pt);
1399
1400 case KGSL_MMU_SECURE_PT:
1401 return _init_secure_pt(mmu, pt);
1402
1403 default:
1404 return _init_per_process_pt(mmu, pt);
1405 }
1406}
1407
1408static struct kgsl_pagetable *kgsl_iommu_getpagetable(struct kgsl_mmu *mmu,
1409 unsigned long name)
1410{
1411 struct kgsl_pagetable *pt;
1412
1413 if (!kgsl_mmu_is_perprocess(mmu) && (name != KGSL_MMU_SECURE_PT)) {
1414 name = KGSL_MMU_GLOBAL_PT;
1415 if (mmu->defaultpagetable != NULL)
1416 return mmu->defaultpagetable;
1417 }
1418
1419 pt = kgsl_get_pagetable(name);
1420 if (pt == NULL)
1421 pt = kgsl_mmu_createpagetableobject(mmu, name);
1422
1423 return pt;
1424}
1425
1426/*
1427 * kgsl_iommu_get_reg_ahbaddr - Returns the ahb address of the register
1428 * @mmu - Pointer to mmu structure
1429 * @id - The context ID of the IOMMU ctx
1430 * @reg - The register for which address is required
1431 *
1432 * Return - The address of register which can be used in type0 packet
1433 */
1434static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
1435 int id, unsigned int reg)
1436{
1437 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1438 struct kgsl_iommu_context *ctx = &iommu->ctx[id];
1439
1440 return ctx->gpu_offset + kgsl_iommu_reg_list[reg];
1441}
1442
1443static void _detach_context(struct kgsl_iommu_context *ctx)
1444{
1445 struct kgsl_iommu_pt *iommu_pt;
1446
1447 if (ctx->default_pt == NULL)
1448 return;
1449
1450 iommu_pt = ctx->default_pt->priv;
1451
1452 _detach_pt(iommu_pt, ctx);
1453
1454 ctx->default_pt = NULL;
1455}
1456
1457static void kgsl_iommu_close(struct kgsl_mmu *mmu)
1458{
1459 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1460 int i;
1461
1462 for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
1463 _detach_context(&iommu->ctx[i]);
1464
1465 kgsl_mmu_putpagetable(mmu->defaultpagetable);
1466 mmu->defaultpagetable = NULL;
1467
1468 kgsl_mmu_putpagetable(mmu->securepagetable);
1469 mmu->securepagetable = NULL;
1470
1471 if (iommu->regbase != NULL)
1472 iounmap(iommu->regbase);
1473
1474 kgsl_sharedmem_free(&kgsl_secure_guard_page_memdesc);
1475
1476 if (kgsl_guard_page != NULL) {
1477 __free_page(kgsl_guard_page);
1478 kgsl_guard_page = NULL;
1479 }
1480
1481 if (kgsl_dummy_page != NULL) {
1482 __free_page(kgsl_dummy_page);
1483 kgsl_dummy_page = NULL;
1484 }
1485
1486 kgsl_iommu_remove_global(mmu, &iommu->setstate);
1487 kgsl_sharedmem_free(&iommu->setstate);
1488 kgsl_cleanup_qdss_desc(mmu);
Jonathan Wicks4892d8d2017-02-24 16:21:26 -07001489 kgsl_cleanup_qtimer_desc(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001490}
1491
1492static int _setstate_alloc(struct kgsl_device *device,
1493 struct kgsl_iommu *iommu)
1494{
1495 int ret;
1496
Lynus Vaz90d98b52018-04-09 14:45:36 +05301497 kgsl_memdesc_init(device, &iommu->setstate, 0);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001498 ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, PAGE_SIZE);
1499
1500 if (!ret) {
1501 /* Mark the setstate memory as read only */
1502 iommu->setstate.flags |= KGSL_MEMFLAGS_GPUREADONLY;
1503
1504 kgsl_sharedmem_set(device, &iommu->setstate, 0, 0, PAGE_SIZE);
1505 }
1506
1507 return ret;
1508}
1509
1510static int kgsl_iommu_init(struct kgsl_mmu *mmu)
1511{
1512 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
1513 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1514 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1515 int status;
1516
1517 mmu->features |= KGSL_MMU_PAGED;
1518
1519 if (ctx->name == NULL) {
1520 KGSL_CORE_ERR("dt: gfx3d0_user context bank not found\n");
1521 return -EINVAL;
1522 }
1523
1524 status = _setstate_alloc(device, iommu);
1525 if (status)
1526 return status;
1527
1528 /* check requirements for per process pagetables */
1529 if (ctx->gpu_offset == UINT_MAX) {
1530 KGSL_CORE_ERR("missing qcom,gpu-offset forces global pt\n");
1531 mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
1532 }
1533
1534 if (iommu->version == 1 && iommu->micro_mmu_ctrl == UINT_MAX) {
1535 KGSL_CORE_ERR(
1536 "missing qcom,micro-mmu-control forces global pt\n");
1537 mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
1538 }
1539
1540 /* Check to see if we need to do the IOMMU sync dance */
1541 need_iommu_sync = of_property_read_bool(device->pdev->dev.of_node,
1542 "qcom,gpu-quirk-iommu-sync");
1543
1544 iommu->regbase = ioremap(iommu->regstart, iommu->regsize);
1545 if (iommu->regbase == NULL) {
1546 KGSL_CORE_ERR("Could not map IOMMU registers 0x%lx:0x%x\n",
1547 iommu->regstart, iommu->regsize);
1548 status = -ENOMEM;
1549 goto done;
1550 }
1551
1552 if (addr_entry_cache == NULL) {
1553 addr_entry_cache = KMEM_CACHE(kgsl_iommu_addr_entry, 0);
1554 if (addr_entry_cache == NULL) {
1555 status = -ENOMEM;
1556 goto done;
1557 }
1558 }
1559
1560 kgsl_iommu_add_global(mmu, &iommu->setstate, "setstate");
1561 kgsl_setup_qdss_desc(device);
Jonathan Wicks4892d8d2017-02-24 16:21:26 -07001562 kgsl_setup_qtimer_desc(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001563
Harshdeep Dhatt1f408332017-03-27 11:35:13 -06001564 if (!mmu->secured)
1565 goto done;
1566
1567 mmu->securepagetable = kgsl_mmu_getpagetable(mmu,
1568 KGSL_MMU_SECURE_PT);
1569 if (IS_ERR(mmu->securepagetable)) {
1570 status = PTR_ERR(mmu->securepagetable);
1571 mmu->securepagetable = NULL;
1572 } else if (mmu->securepagetable == NULL) {
1573 status = -ENOMEM;
1574 }
1575
Shrenuj Bansala419c792016-10-20 14:05:11 -07001576done:
1577 if (status)
1578 kgsl_iommu_close(mmu);
1579
1580 return status;
1581}
1582
1583static int _setup_user_context(struct kgsl_mmu *mmu)
1584{
1585 int ret = 0;
1586 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1587 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1588 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
1589 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1590 struct kgsl_iommu_pt *iommu_pt = NULL;
1591 unsigned int sctlr_val;
1592
1593 if (mmu->defaultpagetable == NULL) {
1594 mmu->defaultpagetable = kgsl_mmu_getpagetable(mmu,
1595 KGSL_MMU_GLOBAL_PT);
1596 /* if we don't have a default pagetable, nothing will work */
1597 if (IS_ERR(mmu->defaultpagetable)) {
1598 ret = PTR_ERR(mmu->defaultpagetable);
1599 mmu->defaultpagetable = NULL;
1600 return ret;
Lynus Vaza2e31112017-04-17 18:29:58 +05301601 } else if (mmu->defaultpagetable == NULL) {
1602 return -ENOMEM;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001603 }
1604 }
1605
1606 iommu_pt = mmu->defaultpagetable->priv;
1607 if (iommu_pt == NULL)
1608 return -ENODEV;
1609
1610 ret = _attach_pt(iommu_pt, ctx);
1611 if (ret)
1612 return ret;
1613
1614 ctx->default_pt = mmu->defaultpagetable;
1615
1616 kgsl_iommu_enable_clk(mmu);
1617
1618 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
1619
1620 /*
1621 * If pagefault policy is GPUHALT_ENABLE,
1622 * 1) Program CFCFG to 1 to enable STALL mode
1623 * 2) Program HUPCF to 0 (Stall or terminate subsequent
1624 * transactions in the presence of an outstanding fault)
1625 * else
1626 * 1) Program CFCFG to 0 to disable STALL mode (0=Terminate)
1627 * 2) Program HUPCF to 1 (Process subsequent transactions
1628 * independently of any outstanding fault)
1629 */
1630
1631 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
1632 &adreno_dev->ft_pf_policy)) {
1633 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
1634 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
1635 } else {
1636 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
1637 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
1638 }
1639 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
1640 kgsl_iommu_disable_clk(mmu);
1641
1642 return 0;
1643}
1644
1645static int _setup_secure_context(struct kgsl_mmu *mmu)
1646{
1647 int ret;
1648 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1649 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
1650 unsigned int cb_num;
1651
1652 struct kgsl_iommu_pt *iommu_pt;
1653
1654 if (ctx->dev == NULL || !mmu->secured)
1655 return 0;
1656
Harshdeep Dhatt1f408332017-03-27 11:35:13 -06001657 if (mmu->securepagetable == NULL)
1658 return -ENOMEM;
1659
Shrenuj Bansala419c792016-10-20 14:05:11 -07001660 iommu_pt = mmu->securepagetable->priv;
1661
1662 ret = _attach_pt(iommu_pt, ctx);
1663 if (ret)
1664 goto done;
1665
1666 ctx->default_pt = mmu->securepagetable;
1667
1668 ret = iommu_domain_get_attr(iommu_pt->domain, DOMAIN_ATTR_CONTEXT_BANK,
1669 &cb_num);
1670 if (ret) {
1671 KGSL_CORE_ERR("get CONTEXT_BANK attr, err %d\n", ret);
1672 goto done;
1673 }
1674 ctx->cb_num = cb_num;
1675done:
1676 if (ret)
1677 _detach_context(ctx);
1678 return ret;
1679}
1680
1681static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
1682
1683static int kgsl_iommu_start(struct kgsl_mmu *mmu)
1684{
1685 int status;
1686 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1687
1688 status = _setup_user_context(mmu);
1689 if (status)
1690 return status;
1691
1692 status = _setup_secure_context(mmu);
1693 if (status) {
1694 _detach_context(&iommu->ctx[KGSL_IOMMU_CONTEXT_USER]);
1695 return status;
1696 }
1697
1698 /* Make sure the hardware is programmed to the default pagetable */
1699 return kgsl_iommu_set_pt(mmu, mmu->defaultpagetable);
1700}
1701
1702static int
1703kgsl_iommu_unmap_offset(struct kgsl_pagetable *pt,
1704 struct kgsl_memdesc *memdesc, uint64_t addr,
1705 uint64_t offset, uint64_t size)
1706{
1707 if (size == 0 || (size + offset) > kgsl_memdesc_footprint(memdesc))
1708 return -EINVAL;
1709 /*
1710 * All GPU addresses as assigned are page aligned, but some
1711 * functions perturb the gpuaddr with an offset, so apply the
1712 * mask here to make sure we have the right address.
1713 */
1714
1715 addr = PAGE_ALIGN(addr);
1716 if (addr == 0)
1717 return -EINVAL;
1718
Carter Coopera1c7cce2017-12-15 13:29:29 -07001719 return _iommu_unmap_sync_pc(pt, addr + offset, size);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001720}
1721
1722static int
1723kgsl_iommu_unmap(struct kgsl_pagetable *pt, struct kgsl_memdesc *memdesc)
1724{
1725 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
1726 return -EINVAL;
1727
1728 return kgsl_iommu_unmap_offset(pt, memdesc, memdesc->gpuaddr, 0,
1729 kgsl_memdesc_footprint(memdesc));
1730}
1731
1732/**
1733 * _iommu_map_guard_page - Map iommu guard page
1734 * @pt - Pointer to kgsl pagetable structure
1735 * @memdesc - memdesc to add guard page
1736 * @gpuaddr - GPU addr of guard page
1737 * @protflags - flags for mapping
1738 *
1739 * Return 0 on success, error on map fail
1740 */
1741static int _iommu_map_guard_page(struct kgsl_pagetable *pt,
1742 struct kgsl_memdesc *memdesc,
1743 uint64_t gpuaddr,
1744 unsigned int protflags)
1745{
1746 phys_addr_t physaddr;
1747
1748 if (!kgsl_memdesc_has_guard_page(memdesc))
1749 return 0;
1750
1751 /*
1752 * Allocate guard page for secure buffers.
1753 * This has to be done after we attach a smmu pagetable.
1754 * Allocate the guard page when first secure buffer is.
1755 * mapped to save 1MB of memory if CPZ is not used.
1756 */
1757 if (kgsl_memdesc_is_secured(memdesc)) {
1758 struct scatterlist *sg;
1759 unsigned int sgp_size = pt->mmu->secure_align_mask + 1;
1760
1761 if (!kgsl_secure_guard_page_memdesc.sgt) {
1762 if (kgsl_allocate_user(KGSL_MMU_DEVICE(pt->mmu),
1763 &kgsl_secure_guard_page_memdesc,
1764 sgp_size, KGSL_MEMFLAGS_SECURE)) {
1765 KGSL_CORE_ERR(
1766 "Secure guard page alloc failed\n");
1767 return -ENOMEM;
1768 }
1769 }
1770
1771 sg = kgsl_secure_guard_page_memdesc.sgt->sgl;
1772 physaddr = page_to_phys(sg_page(sg));
1773 } else {
1774 if (kgsl_guard_page == NULL) {
1775 kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
1776 __GFP_NORETRY | __GFP_HIGHMEM);
1777 if (kgsl_guard_page == NULL)
1778 return -ENOMEM;
1779 }
1780
1781 physaddr = page_to_phys(kgsl_guard_page);
1782 }
1783
Carter Coopera1c7cce2017-12-15 13:29:29 -07001784 return _iommu_map_sync_pc(pt, gpuaddr, physaddr,
Shrenuj Bansala419c792016-10-20 14:05:11 -07001785 kgsl_memdesc_guard_page_size(memdesc),
1786 protflags & ~IOMMU_WRITE);
1787}
1788
1789static unsigned int _get_protection_flags(struct kgsl_memdesc *memdesc)
1790{
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001791 unsigned int flags = IOMMU_READ | IOMMU_WRITE |
1792 IOMMU_NOEXEC | IOMMU_USE_UPSTREAM_HINT;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001793
1794 if (memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY)
1795 flags &= ~IOMMU_WRITE;
1796
1797 if (memdesc->priv & KGSL_MEMDESC_PRIVILEGED)
1798 flags |= IOMMU_PRIV;
1799
Shrenuj Bansal4fd6a562017-08-07 15:12:54 -07001800 if (memdesc->flags & KGSL_MEMFLAGS_IOCOHERENT)
1801 flags |= IOMMU_CACHE;
1802
Shrenuj Bansala419c792016-10-20 14:05:11 -07001803 return flags;
1804}
1805
1806static int
1807kgsl_iommu_map(struct kgsl_pagetable *pt,
1808 struct kgsl_memdesc *memdesc)
1809{
1810 int ret;
1811 uint64_t addr = memdesc->gpuaddr;
1812 uint64_t size = memdesc->size;
1813 unsigned int flags = _get_protection_flags(memdesc);
1814 struct sg_table *sgt = NULL;
1815
1816 /*
1817 * For paged memory allocated through kgsl, memdesc->pages is not NULL.
1818 * Allocate sgt here just for its map operation. Contiguous memory
1819 * already has its sgt, so no need to allocate it here.
1820 */
1821 if (memdesc->pages != NULL)
1822 sgt = kgsl_alloc_sgt_from_pages(memdesc);
1823 else
1824 sgt = memdesc->sgt;
1825
1826 if (IS_ERR(sgt))
1827 return PTR_ERR(sgt);
1828
Carter Coopera1c7cce2017-12-15 13:29:29 -07001829 ret = _iommu_map_sg_sync_pc(pt, addr, sgt->sgl, sgt->nents, flags);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001830 if (ret)
1831 goto done;
1832
1833 ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
1834 if (ret)
Carter Coopera1c7cce2017-12-15 13:29:29 -07001835 _iommu_unmap_sync_pc(pt, addr, size);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001836
1837done:
1838 if (memdesc->pages != NULL)
1839 kgsl_free_sgt(sgt);
1840
1841 return ret;
1842}
1843
1844static int kgsl_iommu_sparse_dummy_map(struct kgsl_pagetable *pt,
1845 struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size)
1846{
1847 int ret = 0, i;
1848 struct page **pages = NULL;
1849 struct sg_table sgt;
1850 int count = size >> PAGE_SHIFT;
1851
1852 /* verify the offset is within our range */
1853 if (size + offset > memdesc->size)
1854 return -EINVAL;
1855
1856 if (kgsl_dummy_page == NULL) {
1857 kgsl_dummy_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
1858 __GFP_HIGHMEM);
1859 if (kgsl_dummy_page == NULL)
1860 return -ENOMEM;
1861 }
1862
1863 pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
1864 if (pages == NULL)
1865 return -ENOMEM;
1866
1867 for (i = 0; i < count; i++)
1868 pages[i] = kgsl_dummy_page;
1869
1870 ret = sg_alloc_table_from_pages(&sgt, pages, count,
1871 0, size, GFP_KERNEL);
1872 if (ret == 0) {
1873 ret = _iommu_map_sg_sync_pc(pt, memdesc->gpuaddr + offset,
Carter Coopera1c7cce2017-12-15 13:29:29 -07001874 sgt.sgl, sgt.nents, IOMMU_READ | IOMMU_NOEXEC);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001875 sg_free_table(&sgt);
1876 }
1877
1878 kfree(pages);
1879
1880 return ret;
1881}
1882
1883static int _map_to_one_page(struct kgsl_pagetable *pt, uint64_t addr,
1884 struct kgsl_memdesc *memdesc, uint64_t physoffset,
1885 uint64_t size, unsigned int map_flags)
1886{
1887 int ret = 0, i;
1888 int pg_sz = kgsl_memdesc_get_pagesize(memdesc);
1889 int count = size >> PAGE_SHIFT;
1890 struct page *page = NULL;
1891 struct page **pages = NULL;
1892 struct sg_page_iter sg_iter;
1893 struct sg_table sgt;
1894
1895 /* Find our physaddr offset addr */
1896 if (memdesc->pages != NULL)
1897 page = memdesc->pages[physoffset >> PAGE_SHIFT];
1898 else {
1899 for_each_sg_page(memdesc->sgt->sgl, &sg_iter,
1900 memdesc->sgt->nents, physoffset >> PAGE_SHIFT) {
1901 page = sg_page_iter_page(&sg_iter);
1902 break;
1903 }
1904 }
1905
1906 if (page == NULL)
1907 return -EINVAL;
1908
1909 pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
1910 if (pages == NULL)
1911 return -ENOMEM;
1912
1913 for (i = 0; i < count; i++) {
1914 if (pg_sz != PAGE_SIZE) {
1915 struct page *tmp_page = page;
1916 int j;
1917
1918 for (j = 0; j < 16; j++, tmp_page += PAGE_SIZE)
1919 pages[i++] = tmp_page;
1920 } else
1921 pages[i] = page;
1922 }
1923
1924 ret = sg_alloc_table_from_pages(&sgt, pages, count,
1925 0, size, GFP_KERNEL);
1926 if (ret == 0) {
Carter Coopera1c7cce2017-12-15 13:29:29 -07001927 ret = _iommu_map_sg_sync_pc(pt, addr, sgt.sgl,
Shrenuj Bansala419c792016-10-20 14:05:11 -07001928 sgt.nents, map_flags);
1929 sg_free_table(&sgt);
1930 }
1931
1932 kfree(pages);
1933
1934 return ret;
1935}
1936
1937static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
1938 uint64_t virtaddr, uint64_t virtoffset,
1939 struct kgsl_memdesc *memdesc, uint64_t physoffset,
1940 uint64_t size, uint64_t feature_flag)
1941{
1942 int pg_sz;
1943 unsigned int protflags = _get_protection_flags(memdesc);
1944 int ret;
1945 struct sg_table *sgt = NULL;
1946
1947 pg_sz = kgsl_memdesc_get_pagesize(memdesc);
1948 if (!IS_ALIGNED(virtaddr | virtoffset | physoffset | size, pg_sz))
1949 return -EINVAL;
1950
1951 if (size == 0)
1952 return -EINVAL;
1953
1954 if (!(feature_flag & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS) &&
1955 size + physoffset > kgsl_memdesc_footprint(memdesc))
1956 return -EINVAL;
1957
1958 /*
1959 * For paged memory allocated through kgsl, memdesc->pages is not NULL.
1960 * Allocate sgt here just for its map operation. Contiguous memory
1961 * already has its sgt, so no need to allocate it here.
1962 */
1963 if (memdesc->pages != NULL)
1964 sgt = kgsl_alloc_sgt_from_pages(memdesc);
1965 else
1966 sgt = memdesc->sgt;
1967
1968 if (IS_ERR(sgt))
1969 return PTR_ERR(sgt);
1970
1971 if (feature_flag & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS)
1972 ret = _map_to_one_page(pt, virtaddr + virtoffset,
1973 memdesc, physoffset, size, protflags);
1974 else
1975 ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
Carter Coopera1c7cce2017-12-15 13:29:29 -07001976 sgt->sgl, sgt->nents,
Shrenuj Bansala419c792016-10-20 14:05:11 -07001977 physoffset, size, protflags);
1978
1979 if (memdesc->pages != NULL)
1980 kgsl_free_sgt(sgt);
1981
1982 return ret;
1983}
1984
1985/* This function must be called with context bank attached */
1986static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
1987{
1988 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1989 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1990 unsigned int sctlr_val;
1991
1992 if (ctx->default_pt != NULL) {
1993 kgsl_iommu_enable_clk(mmu);
1994 KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff);
1995 /*
1996 * Re-enable context fault interrupts after clearing
1997 * FSR to prevent the interrupt from firing repeatedly
1998 */
1999 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
2000 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
2001 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
2002 /*
2003 * Make sure the above register writes
2004 * are not reordered across the barrier
2005 * as we use writel_relaxed to write them
2006 */
2007 wmb();
2008 kgsl_iommu_disable_clk(mmu);
2009 }
2010}
2011
2012static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
2013{
2014 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2015 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2016
2017 if (ctx->default_pt != NULL && ctx->fault) {
2018 /*
2019 * Write 1 to RESUME.TnR to terminate the
2020 * stalled transaction.
2021 */
2022 KGSL_IOMMU_SET_CTX_REG(ctx, RESUME, 1);
2023 /*
2024 * Make sure the above register writes
2025 * are not reordered across the barrier
2026 * as we use writel_relaxed to write them
2027 */
2028 wmb();
2029 ctx->fault = 0;
2030 }
2031}
2032
2033static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
2034{
2035 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2036 int i;
2037
2038 /*
2039 * If the iommu supports retention, we don't need
2040 * to detach when stopping.
2041 */
2042 if (!MMU_FEATURE(mmu, KGSL_MMU_RETENTION)) {
2043 for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
2044 _detach_context(&iommu->ctx[i]);
2045 }
2046}
2047
2048static u64
2049kgsl_iommu_get_current_ttbr0(struct kgsl_mmu *mmu)
2050{
2051 u64 val;
2052 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
Harshdeep Dhatt1e55e212018-10-12 20:32:17 -06002053 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2054
Shrenuj Bansala419c792016-10-20 14:05:11 -07002055 /*
2056 * We cannot enable or disable the clocks in interrupt context, this
2057 * function is called from interrupt context if there is an axi error
2058 */
2059 if (in_interrupt())
2060 return 0;
2061
Harshdeep Dhatt1e55e212018-10-12 20:32:17 -06002062 if (ctx->regbase == NULL)
2063 return 0;
2064
Shrenuj Bansala419c792016-10-20 14:05:11 -07002065 kgsl_iommu_enable_clk(mmu);
Harshdeep Dhatt1e55e212018-10-12 20:32:17 -06002066 val = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002067 kgsl_iommu_disable_clk(mmu);
2068 return val;
2069}
2070
2071/*
2072 * kgsl_iommu_set_pt - Change the IOMMU pagetable of the primary context bank
2073 * @mmu - Pointer to mmu structure
2074 * @pt - Pagetable to switch to
2075 *
2076 * Set the new pagetable for the IOMMU by doing direct register writes
2077 * to the IOMMU registers through the cpu
2078 *
2079 * Return - void
2080 */
2081static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
2082{
2083 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2084 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2085 uint64_t ttbr0, temp;
2086 unsigned int contextidr;
2087 unsigned long wait_for_flush;
2088
2089 if ((pt != mmu->defaultpagetable) && !kgsl_mmu_is_perprocess(mmu))
2090 return 0;
2091
2092 kgsl_iommu_enable_clk(mmu);
2093
2094 ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pt);
2095 contextidr = kgsl_mmu_pagetable_get_contextidr(pt);
2096
2097 KGSL_IOMMU_SET_CTX_REG_Q(ctx, TTBR0, ttbr0);
2098 KGSL_IOMMU_SET_CTX_REG(ctx, CONTEXTIDR, contextidr);
2099
2100 /* memory barrier before reading TTBR0 register */
2101 mb();
2102 temp = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
2103
2104 KGSL_IOMMU_SET_CTX_REG(ctx, TLBIALL, 1);
2105 /* make sure the TBLI write completes before we wait */
2106 mb();
2107 /*
2108 * Wait for flush to complete by polling the flush
2109 * status bit of TLBSTATUS register for not more than
2110 * 2 s. After 2s just exit, at that point the SMMU h/w
2111 * may be stuck and will eventually cause GPU to hang
2112 * or bring the system down.
2113 */
2114 wait_for_flush = jiffies + msecs_to_jiffies(2000);
2115 KGSL_IOMMU_SET_CTX_REG(ctx, TLBSYNC, 0);
2116 while (KGSL_IOMMU_GET_CTX_REG(ctx, TLBSTATUS) &
2117 (KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE)) {
2118 if (time_after(jiffies, wait_for_flush)) {
2119 KGSL_DRV_WARN(KGSL_MMU_DEVICE(mmu),
2120 "Wait limit reached for IOMMU tlb flush\n");
2121 break;
2122 }
2123 cpu_relax();
2124 }
2125
2126 kgsl_iommu_disable_clk(mmu);
2127 return 0;
2128}
2129
2130/*
2131 * kgsl_iommu_set_pf_policy() - Set the pagefault policy for IOMMU
2132 * @mmu: Pointer to mmu structure
2133 * @pf_policy: The pagefault polict to set
2134 *
2135 * Check if the new policy indicated by pf_policy is same as current
2136 * policy, if same then return else set the policy
2137 */
2138static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
2139 unsigned long pf_policy)
2140{
2141 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2142 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2143 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
2144 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2145
2146 if ((adreno_dev->ft_pf_policy &
2147 BIT(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)) ==
2148 (pf_policy & BIT(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)))
2149 return 0;
2150
2151 /* If not attached, policy will be updated during the next attach */
2152 if (ctx->default_pt != NULL) {
2153 unsigned int sctlr_val;
2154
2155 kgsl_iommu_enable_clk(mmu);
2156
2157 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
2158
2159 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, &pf_policy)) {
2160 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
2161 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
2162 } else {
2163 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
2164 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
2165 }
2166
2167 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
2168
2169 kgsl_iommu_disable_clk(mmu);
2170 }
2171
2172 return 0;
2173}
2174
2175static struct kgsl_protected_registers *
2176kgsl_iommu_get_prot_regs(struct kgsl_mmu *mmu)
2177{
2178 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2179
2180 return &iommu->protect;
2181}
2182
2183static struct kgsl_iommu_addr_entry *_find_gpuaddr(
2184 struct kgsl_pagetable *pagetable, uint64_t gpuaddr)
2185{
2186 struct kgsl_iommu_pt *pt = pagetable->priv;
2187 struct rb_node *node = pt->rbtree.rb_node;
2188
2189 while (node != NULL) {
2190 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2191 struct kgsl_iommu_addr_entry, node);
2192
2193 if (gpuaddr < entry->base)
2194 node = node->rb_left;
2195 else if (gpuaddr > entry->base)
2196 node = node->rb_right;
2197 else
2198 return entry;
2199 }
2200
2201 return NULL;
2202}
2203
2204static int _remove_gpuaddr(struct kgsl_pagetable *pagetable,
2205 uint64_t gpuaddr)
2206{
2207 struct kgsl_iommu_pt *pt = pagetable->priv;
2208 struct kgsl_iommu_addr_entry *entry;
2209
2210 entry = _find_gpuaddr(pagetable, gpuaddr);
2211
2212 if (entry != NULL) {
2213 rb_erase(&entry->node, &pt->rbtree);
2214 kmem_cache_free(addr_entry_cache, entry);
2215 return 0;
2216 }
2217
2218 WARN(1, "Couldn't remove gpuaddr: 0x%llx\n", gpuaddr);
2219 return -ENOMEM;
2220}
2221
2222static int _insert_gpuaddr(struct kgsl_pagetable *pagetable,
2223 uint64_t gpuaddr, uint64_t size)
2224{
2225 struct kgsl_iommu_pt *pt = pagetable->priv;
2226 struct rb_node **node, *parent = NULL;
2227 struct kgsl_iommu_addr_entry *new =
2228 kmem_cache_alloc(addr_entry_cache, GFP_ATOMIC);
2229
2230 if (new == NULL)
2231 return -ENOMEM;
2232
2233 new->base = gpuaddr;
2234 new->size = size;
2235
2236 node = &pt->rbtree.rb_node;
2237
2238 while (*node != NULL) {
2239 struct kgsl_iommu_addr_entry *this;
2240
2241 parent = *node;
2242 this = rb_entry(parent, struct kgsl_iommu_addr_entry, node);
2243
2244 if (new->base < this->base)
2245 node = &parent->rb_left;
2246 else if (new->base > this->base)
2247 node = &parent->rb_right;
2248 else {
2249 /* Duplicate entry */
2250 WARN(1, "duplicate gpuaddr: 0x%llx\n", gpuaddr);
2251 return -EEXIST;
2252 }
2253 }
2254
2255 rb_link_node(&new->node, parent, node);
2256 rb_insert_color(&new->node, &pt->rbtree);
2257
2258 return 0;
2259}
2260
2261static uint64_t _get_unmapped_area(struct kgsl_pagetable *pagetable,
2262 uint64_t bottom, uint64_t top, uint64_t size,
2263 uint64_t align)
2264{
2265 struct kgsl_iommu_pt *pt = pagetable->priv;
2266 struct rb_node *node = rb_first(&pt->rbtree);
2267 uint64_t start;
2268
2269 bottom = ALIGN(bottom, align);
2270 start = bottom;
2271
2272 while (node != NULL) {
2273 uint64_t gap;
2274 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2275 struct kgsl_iommu_addr_entry, node);
2276
2277 /*
2278 * Skip any entries that are outside of the range, but make sure
2279 * to account for some that might straddle the lower bound
2280 */
2281 if (entry->base < bottom) {
2282 if (entry->base + entry->size > bottom)
2283 start = ALIGN(entry->base + entry->size, align);
2284 node = rb_next(node);
2285 continue;
2286 }
2287
2288 /* Stop if we went over the top */
2289 if (entry->base >= top)
2290 break;
2291
2292 /* Make sure there is a gap to consider */
2293 if (start < entry->base) {
2294 gap = entry->base - start;
2295
2296 if (gap >= size)
2297 return start;
2298 }
2299
2300 /* Stop if there is no more room in the region */
2301 if (entry->base + entry->size >= top)
2302 return (uint64_t) -ENOMEM;
2303
2304 /* Start the next cycle at the end of the current entry */
2305 start = ALIGN(entry->base + entry->size, align);
2306 node = rb_next(node);
2307 }
2308
2309 if (start + size <= top)
2310 return start;
2311
2312 return (uint64_t) -ENOMEM;
2313}
2314
2315static uint64_t _get_unmapped_area_topdown(struct kgsl_pagetable *pagetable,
2316 uint64_t bottom, uint64_t top, uint64_t size,
2317 uint64_t align)
2318{
2319 struct kgsl_iommu_pt *pt = pagetable->priv;
2320 struct rb_node *node = rb_last(&pt->rbtree);
2321 uint64_t end = top;
2322 uint64_t mask = ~(align - 1);
2323 struct kgsl_iommu_addr_entry *entry;
2324
2325 /* Make sure that the bottom is correctly aligned */
2326 bottom = ALIGN(bottom, align);
2327
2328 /* Make sure the requested size will fit in the range */
2329 if (size > (top - bottom))
2330 return -ENOMEM;
2331
2332 /* Walk back through the list to find the highest entry in the range */
2333 for (node = rb_last(&pt->rbtree); node != NULL; node = rb_prev(node)) {
2334 entry = rb_entry(node, struct kgsl_iommu_addr_entry, node);
2335 if (entry->base < top)
2336 break;
2337 }
2338
2339 while (node != NULL) {
2340 uint64_t offset;
2341
2342 entry = rb_entry(node, struct kgsl_iommu_addr_entry, node);
2343
2344 /* If the entire entry is below the range the search is over */
2345 if ((entry->base + entry->size) < bottom)
2346 break;
2347
2348 /* Get the top of the entry properly aligned */
2349 offset = ALIGN(entry->base + entry->size, align);
2350
2351 /*
2352 * Try to allocate the memory from the top of the gap,
2353 * making sure that it fits between the top of this entry and
2354 * the bottom of the previous one
2355 */
2356
2357 if ((end > size) && (offset < end)) {
2358 uint64_t chunk = (end - size) & mask;
2359
2360 if (chunk >= offset)
2361 return chunk;
2362 }
2363
2364 /*
2365 * If we get here and the current entry is outside of the range
2366 * then we are officially out of room
2367 */
2368
2369 if (entry->base < bottom)
2370 return (uint64_t) -ENOMEM;
2371
2372 /* Set the top of the gap to the current entry->base */
2373 end = entry->base;
2374
2375 /* And move on to the next lower entry */
2376 node = rb_prev(node);
2377 }
2378
2379 /* If we get here then there are no more entries in the region */
2380 if ((end > size) && (((end - size) & mask) >= bottom))
2381 return (end - size) & mask;
2382
2383 return (uint64_t) -ENOMEM;
2384}
2385
2386static uint64_t kgsl_iommu_find_svm_region(struct kgsl_pagetable *pagetable,
2387 uint64_t start, uint64_t end, uint64_t size,
2388 uint64_t alignment)
2389{
2390 uint64_t addr;
2391
2392 /* Avoid black holes */
2393 if (WARN(end <= start, "Bad search range: 0x%llx-0x%llx", start, end))
2394 return (uint64_t) -EINVAL;
2395
2396 spin_lock(&pagetable->lock);
2397 addr = _get_unmapped_area_topdown(pagetable,
2398 start, end, size, alignment);
2399 spin_unlock(&pagetable->lock);
2400 return addr;
2401}
2402
2403static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
2404 uint64_t gpuaddr, uint64_t size)
2405{
2406 int ret = -ENOMEM;
2407 struct kgsl_iommu_pt *pt = pagetable->priv;
2408 struct rb_node *node;
2409
2410 /* Make sure the requested address doesn't fall in the global range */
Deepak Kumar756d6a92017-11-28 16:58:29 +05302411 if (ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr) ||
2412 ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr + size))
Shrenuj Bansala419c792016-10-20 14:05:11 -07002413 return -ENOMEM;
2414
2415 spin_lock(&pagetable->lock);
2416 node = pt->rbtree.rb_node;
2417
2418 while (node != NULL) {
2419 uint64_t start, end;
2420 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2421 struct kgsl_iommu_addr_entry, node);
2422
2423 start = entry->base;
2424 end = entry->base + entry->size;
2425
2426 if (gpuaddr + size <= start)
2427 node = node->rb_left;
2428 else if (end <= gpuaddr)
2429 node = node->rb_right;
2430 else
2431 goto out;
2432 }
2433
2434 ret = _insert_gpuaddr(pagetable, gpuaddr, size);
2435out:
2436 spin_unlock(&pagetable->lock);
2437 return ret;
2438}
2439
2440
2441static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
2442 struct kgsl_memdesc *memdesc)
2443{
2444 struct kgsl_iommu_pt *pt = pagetable->priv;
2445 int ret = 0;
2446 uint64_t addr, start, end, size;
2447 unsigned int align;
2448
2449 if (WARN_ON(kgsl_memdesc_use_cpu_map(memdesc)))
2450 return -EINVAL;
2451
2452 if (memdesc->flags & KGSL_MEMFLAGS_SECURE &&
2453 pagetable->name != KGSL_MMU_SECURE_PT)
2454 return -EINVAL;
2455
2456 size = kgsl_memdesc_footprint(memdesc);
2457
2458 align = 1 << kgsl_memdesc_get_align(memdesc);
2459
2460 if (memdesc->flags & KGSL_MEMFLAGS_FORCE_32BIT) {
2461 start = pt->compat_va_start;
2462 end = pt->compat_va_end;
2463 } else {
2464 start = pt->va_start;
2465 end = pt->va_end;
2466 }
2467
Harshdeep Dhatt1f408332017-03-27 11:35:13 -06002468 /*
2469 * When mapping secure buffers, adjust the start of the va range
2470 * to the end of secure global buffers.
2471 */
2472 if (kgsl_memdesc_is_secured(memdesc))
2473 start += secure_global_size;
2474
Shrenuj Bansala419c792016-10-20 14:05:11 -07002475 spin_lock(&pagetable->lock);
2476
2477 addr = _get_unmapped_area(pagetable, start, end, size, align);
2478
2479 if (addr == (uint64_t) -ENOMEM) {
2480 ret = -ENOMEM;
2481 goto out;
2482 }
2483
2484 ret = _insert_gpuaddr(pagetable, addr, size);
2485 if (ret == 0) {
2486 memdesc->gpuaddr = addr;
2487 memdesc->pagetable = pagetable;
2488 }
2489
2490out:
2491 spin_unlock(&pagetable->lock);
2492 return ret;
2493}
2494
2495static void kgsl_iommu_put_gpuaddr(struct kgsl_memdesc *memdesc)
2496{
2497 if (memdesc->pagetable == NULL)
2498 return;
2499
2500 spin_lock(&memdesc->pagetable->lock);
2501
2502 _remove_gpuaddr(memdesc->pagetable, memdesc->gpuaddr);
2503
2504 spin_unlock(&memdesc->pagetable->lock);
2505}
2506
2507static int kgsl_iommu_svm_range(struct kgsl_pagetable *pagetable,
2508 uint64_t *lo, uint64_t *hi, uint64_t memflags)
2509{
2510 struct kgsl_iommu_pt *pt = pagetable->priv;
2511 bool gpu_compat = (memflags & KGSL_MEMFLAGS_FORCE_32BIT) != 0;
2512
2513 if (lo != NULL)
2514 *lo = gpu_compat ? pt->compat_va_start : pt->svm_start;
2515 if (hi != NULL)
2516 *hi = gpu_compat ? pt->compat_va_end : pt->svm_end;
2517
2518 return 0;
2519}
2520
2521static bool kgsl_iommu_addr_in_range(struct kgsl_pagetable *pagetable,
2522 uint64_t gpuaddr)
2523{
2524 struct kgsl_iommu_pt *pt = pagetable->priv;
2525
2526 if (gpuaddr == 0)
2527 return false;
2528
2529 if (gpuaddr >= pt->va_start && gpuaddr < pt->va_end)
2530 return true;
2531
2532 if (gpuaddr >= pt->compat_va_start && gpuaddr < pt->compat_va_end)
2533 return true;
2534
2535 if (gpuaddr >= pt->svm_start && gpuaddr < pt->svm_end)
2536 return true;
2537
2538 return false;
2539}
2540
2541static const struct {
2542 int id;
2543 char *name;
2544} kgsl_iommu_cbs[] = {
2545 { KGSL_IOMMU_CONTEXT_USER, "gfx3d_user", },
2546 { KGSL_IOMMU_CONTEXT_SECURE, "gfx3d_secure" },
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302547 { KGSL_IOMMU_CONTEXT_SECURE, "gfx3d_secure_alt" },
Shrenuj Bansala419c792016-10-20 14:05:11 -07002548};
2549
2550static int _kgsl_iommu_cb_probe(struct kgsl_device *device,
2551 struct kgsl_iommu *iommu, struct device_node *node)
2552{
2553 struct platform_device *pdev = of_find_device_by_node(node);
2554 struct kgsl_iommu_context *ctx = NULL;
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302555 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002556 int i;
2557
2558 for (i = 0; i < ARRAY_SIZE(kgsl_iommu_cbs); i++) {
2559 if (!strcmp(node->name, kgsl_iommu_cbs[i].name)) {
2560 int id = kgsl_iommu_cbs[i].id;
2561
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302562 if (ADRENO_QUIRK(adreno_dev,
2563 ADRENO_QUIRK_MMU_SECURE_CB_ALT)) {
2564 if (!strcmp(node->name, "gfx3d_secure"))
2565 continue;
2566 } else if (!strcmp(node->name, "gfx3d_secure_alt"))
2567 continue;
2568
Shrenuj Bansala419c792016-10-20 14:05:11 -07002569 ctx = &iommu->ctx[id];
2570 ctx->id = id;
2571 ctx->cb_num = -1;
2572 ctx->name = kgsl_iommu_cbs[i].name;
2573
2574 break;
2575 }
2576 }
2577
2578 if (ctx == NULL) {
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302579 KGSL_CORE_ERR("dt: Unused context label %s\n", node->name);
2580 return 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002581 }
2582
2583 if (ctx->id == KGSL_IOMMU_CONTEXT_SECURE)
2584 device->mmu.secured = true;
2585
2586 /* this property won't be found for all context banks */
2587 if (of_property_read_u32(node, "qcom,gpu-offset", &ctx->gpu_offset))
2588 ctx->gpu_offset = UINT_MAX;
2589
2590 ctx->kgsldev = device;
2591
2592 /* arm-smmu driver we'll have the right device pointer here. */
2593 if (of_find_property(node, "iommus", NULL)) {
2594 ctx->dev = &pdev->dev;
2595 } else {
2596 ctx->dev = kgsl_mmu_get_ctx(ctx->name);
2597
2598 if (IS_ERR(ctx->dev))
2599 return PTR_ERR(ctx->dev);
2600 }
2601
2602 return 0;
2603}
2604
2605static const struct {
2606 char *feature;
Lynus Vazeb7af682017-04-17 18:36:01 +05302607 unsigned long bit;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002608} kgsl_iommu_features[] = {
2609 { "qcom,retention", KGSL_MMU_RETENTION },
2610 { "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
2611 { "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC },
2612 { "qcom,force-32bit", KGSL_MMU_FORCE_32BIT },
2613};
2614
2615static int _kgsl_iommu_probe(struct kgsl_device *device,
2616 struct device_node *node)
2617{
2618 const char *cname;
2619 struct property *prop;
2620 u32 reg_val[2];
2621 int i = 0;
2622 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
2623 struct device_node *child;
2624 struct platform_device *pdev = of_find_device_by_node(node);
2625
2626 memset(iommu, 0, sizeof(*iommu));
2627
2628 if (of_device_is_compatible(node, "qcom,kgsl-smmu-v1"))
2629 iommu->version = 1;
2630 else
2631 iommu->version = 2;
2632
2633 if (of_property_read_u32_array(node, "reg", reg_val, 2)) {
2634 KGSL_CORE_ERR("dt: Unable to read KGSL IOMMU register range\n");
2635 return -EINVAL;
2636 }
2637 iommu->regstart = reg_val[0];
2638 iommu->regsize = reg_val[1];
2639
2640 /* Protecting the SMMU registers is mandatory */
2641 if (of_property_read_u32_array(node, "qcom,protect", reg_val, 2)) {
2642 KGSL_CORE_ERR("dt: no iommu protection range specified\n");
2643 return -EINVAL;
2644 }
2645 iommu->protect.base = reg_val[0] / sizeof(u32);
Lynus Vaz607a42d2018-05-23 20:26:51 +05302646 iommu->protect.range = reg_val[1] / sizeof(u32);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002647
2648 of_property_for_each_string(node, "clock-names", prop, cname) {
2649 struct clk *c = devm_clk_get(&pdev->dev, cname);
2650
2651 if (IS_ERR(c)) {
2652 KGSL_CORE_ERR("dt: Couldn't get clock: %s\n", cname);
2653 return -ENODEV;
2654 }
2655 if (i >= KGSL_IOMMU_MAX_CLKS) {
2656 KGSL_CORE_ERR("dt: too many clocks defined.\n");
2657 return -EINVAL;
2658 }
2659
2660 iommu->clks[i] = c;
2661 ++i;
2662 }
2663
2664 for (i = 0; i < ARRAY_SIZE(kgsl_iommu_features); i++) {
2665 if (of_property_read_bool(node, kgsl_iommu_features[i].feature))
2666 device->mmu.features |= kgsl_iommu_features[i].bit;
2667 }
2668
2669 if (of_property_read_u32(node, "qcom,micro-mmu-control",
2670 &iommu->micro_mmu_ctrl))
2671 iommu->micro_mmu_ctrl = UINT_MAX;
2672
2673 if (of_property_read_u32(node, "qcom,secure_align_mask",
2674 &device->mmu.secure_align_mask))
2675 device->mmu.secure_align_mask = 0xfff;
2676
2677 /* Fill out the rest of the devices in the node */
2678 of_platform_populate(node, NULL, NULL, &pdev->dev);
2679
2680 for_each_child_of_node(node, child) {
2681 int ret;
2682
2683 if (!of_device_is_compatible(child, "qcom,smmu-kgsl-cb"))
2684 continue;
2685
2686 ret = _kgsl_iommu_cb_probe(device, iommu, child);
2687 if (ret)
2688 return ret;
2689 }
2690
2691 return 0;
2692}
2693
2694static const struct {
2695 char *compat;
2696 int (*probe)(struct kgsl_device *device, struct device_node *node);
2697} kgsl_dt_devices[] = {
2698 { "qcom,kgsl-smmu-v1", _kgsl_iommu_probe },
2699 { "qcom,kgsl-smmu-v2", _kgsl_iommu_probe },
2700};
2701
2702static int kgsl_iommu_probe(struct kgsl_device *device)
2703{
2704 int i;
2705
2706 for (i = 0; i < ARRAY_SIZE(kgsl_dt_devices); i++) {
2707 struct device_node *node;
2708
2709 node = of_find_compatible_node(device->pdev->dev.of_node,
2710 NULL, kgsl_dt_devices[i].compat);
2711
2712 if (node != NULL)
2713 return kgsl_dt_devices[i].probe(device, node);
2714 }
2715
2716 return -ENODEV;
2717}
2718
2719struct kgsl_mmu_ops kgsl_iommu_ops = {
2720 .mmu_init = kgsl_iommu_init,
2721 .mmu_close = kgsl_iommu_close,
2722 .mmu_start = kgsl_iommu_start,
2723 .mmu_stop = kgsl_iommu_stop,
2724 .mmu_set_pt = kgsl_iommu_set_pt,
2725 .mmu_clear_fsr = kgsl_iommu_clear_fsr,
2726 .mmu_get_current_ttbr0 = kgsl_iommu_get_current_ttbr0,
2727 .mmu_enable_clk = kgsl_iommu_enable_clk,
2728 .mmu_disable_clk = kgsl_iommu_disable_clk,
2729 .mmu_get_reg_ahbaddr = kgsl_iommu_get_reg_ahbaddr,
2730 .mmu_pt_equal = kgsl_iommu_pt_equal,
2731 .mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
2732 .mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
2733 .mmu_get_prot_regs = kgsl_iommu_get_prot_regs,
2734 .mmu_init_pt = kgsl_iommu_init_pt,
2735 .mmu_add_global = kgsl_iommu_add_global,
2736 .mmu_remove_global = kgsl_iommu_remove_global,
2737 .mmu_getpagetable = kgsl_iommu_getpagetable,
2738 .mmu_get_qdss_global_entry = kgsl_iommu_get_qdss_global_entry,
Jonathan Wicks4892d8d2017-02-24 16:21:26 -07002739 .mmu_get_qtimer_global_entry = kgsl_iommu_get_qtimer_global_entry,
Shrenuj Bansala419c792016-10-20 14:05:11 -07002740 .probe = kgsl_iommu_probe,
2741};
2742
2743static struct kgsl_mmu_pt_ops iommu_pt_ops = {
2744 .mmu_map = kgsl_iommu_map,
2745 .mmu_unmap = kgsl_iommu_unmap,
2746 .mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
2747 .get_ttbr0 = kgsl_iommu_get_ttbr0,
2748 .get_contextidr = kgsl_iommu_get_contextidr,
2749 .get_gpuaddr = kgsl_iommu_get_gpuaddr,
2750 .put_gpuaddr = kgsl_iommu_put_gpuaddr,
2751 .set_svm_region = kgsl_iommu_set_svm_region,
2752 .find_svm_region = kgsl_iommu_find_svm_region,
2753 .svm_range = kgsl_iommu_svm_range,
2754 .addr_in_range = kgsl_iommu_addr_in_range,
2755 .mmu_map_offset = kgsl_iommu_map_offset,
2756 .mmu_unmap_offset = kgsl_iommu_unmap_offset,
2757 .mmu_sparse_dummy_map = kgsl_iommu_sparse_dummy_map,
2758};