blob: 0bb24d66cdbc04f16eb4214444674e7cb54831e0 [file] [log] [blame]
Jordan Crouse49967ff2019-09-09 10:41:36 -06001/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/spinlock.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/iommu.h>
20#include <linux/msm_kgsl.h>
21#include <linux/ratelimit.h>
22#include <linux/of_platform.h>
Jordan Crousef7f87a62019-09-11 08:32:15 -060023#include <linux/random.h>
Shrenuj Bansala419c792016-10-20 14:05:11 -070024#include <soc/qcom/scm.h>
25#include <soc/qcom/secure_buffer.h>
Shrenuj Bansala419c792016-10-20 14:05:11 -070026#include <linux/compat.h>
27
28#include "kgsl.h"
29#include "kgsl_device.h"
30#include "kgsl_mmu.h"
31#include "kgsl_sharedmem.h"
32#include "kgsl_iommu.h"
33#include "adreno_pm4types.h"
34#include "adreno.h"
35#include "kgsl_trace.h"
36#include "kgsl_pwrctrl.h"
37
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -070038#define CP_APERTURE_REG 0
Sunil Khatri82eb1ec2018-01-09 15:28:14 +053039#define CP_SMMU_APERTURE_ID 0x1B
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -070040
Shrenuj Bansala419c792016-10-20 14:05:11 -070041#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
42
Deepak Kumar756d6a92017-11-28 16:58:29 +053043#define ADDR_IN_GLOBAL(_mmu, _a) \
44 (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)) && \
45 ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) + \
46 KGSL_IOMMU_GLOBAL_MEM_SIZE)))
Shrenuj Bansala419c792016-10-20 14:05:11 -070047
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -060048/*
49 * Flag to set SMMU memory attributes required to
50 * enable system cache for GPU transactions.
51 */
52#ifndef IOMMU_USE_UPSTREAM_HINT
53#define IOMMU_USE_UPSTREAM_HINT 0
54#endif
55
Shrenuj Bansala419c792016-10-20 14:05:11 -070056static struct kgsl_mmu_pt_ops iommu_pt_ops;
57static bool need_iommu_sync;
58
59const unsigned int kgsl_iommu_reg_list[KGSL_IOMMU_REG_MAX] = {
60 0x0,/* SCTLR */
61 0x20,/* TTBR0 */
62 0x34,/* CONTEXTIDR */
63 0x58,/* FSR */
64 0x60,/* FAR_0 */
65 0x618,/* TLBIALL */
66 0x008,/* RESUME */
67 0x68,/* FSYNR0 */
68 0x6C,/* FSYNR1 */
69 0x7F0,/* TLBSYNC */
70 0x7F4,/* TLBSTATUS */
71};
72
73/*
74 * struct kgsl_iommu_addr_entry - entry in the kgsl_iommu_pt rbtree.
75 * @base: starting virtual address of the entry
76 * @size: size of the entry
77 * @node: the rbtree node
78 *
79 */
80struct kgsl_iommu_addr_entry {
81 uint64_t base;
82 uint64_t size;
83 struct rb_node node;
84};
85
86static struct kmem_cache *addr_entry_cache;
87
88/*
89 * There are certain memory allocations (ringbuffer, memstore, etc) that need to
90 * be present at the same address in every pagetable. We call these "global"
91 * pagetable entries. There are relatively few of these and they are mostly
92 * stable (defined at init time) but the actual number of globals can differ
93 * slight depending on the target and implementation.
94 *
95 * Here we define an array and a simple allocator to keep track of the currently
96 * active global entries. Each entry is assigned a unique address inside of a
Jordan Crouse49967ff2019-09-09 10:41:36 -060097 * MMU implementation specific "global" region. We use a simple bitmap based
98 * allocator for the region to allow for both fixed and dynamic addressing.
Shrenuj Bansala419c792016-10-20 14:05:11 -070099 */
100
101#define GLOBAL_PT_ENTRIES 32
102
103struct global_pt_entry {
104 struct kgsl_memdesc *memdesc;
105 char name[32];
106};
107
Jordan Crouse49967ff2019-09-09 10:41:36 -0600108#define GLOBAL_MAP_PAGES (KGSL_IOMMU_GLOBAL_MEM_SIZE >> PAGE_SHIFT)
109
Shrenuj Bansala419c792016-10-20 14:05:11 -0700110static struct global_pt_entry global_pt_entries[GLOBAL_PT_ENTRIES];
Jordan Crouse49967ff2019-09-09 10:41:36 -0600111static DECLARE_BITMAP(global_map, GLOBAL_MAP_PAGES);
112
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600113static int secure_global_size;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700114static int global_pt_count;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700115static struct kgsl_memdesc gpu_qdss_desc;
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700116static struct kgsl_memdesc gpu_qtimer_desc;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700117
118void kgsl_print_global_pt_entries(struct seq_file *s)
119{
120 int i;
121
122 for (i = 0; i < global_pt_count; i++) {
123 struct kgsl_memdesc *memdesc = global_pt_entries[i].memdesc;
124
125 if (memdesc == NULL)
126 continue;
127
Hareesh Gundu1fbd9062017-11-01 18:47:45 +0530128 seq_printf(s, "0x%pK-0x%pK %16llu %s\n",
129 (uint64_t *)(uintptr_t) memdesc->gpuaddr,
130 (uint64_t *)(uintptr_t) (memdesc->gpuaddr +
131 memdesc->size - 1), memdesc->size,
132 global_pt_entries[i].name);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700133 }
134}
135
136static void kgsl_iommu_unmap_globals(struct kgsl_pagetable *pagetable)
137{
138 unsigned int i;
139
140 for (i = 0; i < global_pt_count; i++) {
141 if (global_pt_entries[i].memdesc != NULL)
142 kgsl_mmu_unmap(pagetable,
143 global_pt_entries[i].memdesc);
144 }
145}
146
147static int kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable)
148{
149 unsigned int i;
150
151 for (i = 0; i < global_pt_count; i++) {
152 if (global_pt_entries[i].memdesc != NULL) {
153 int ret = kgsl_mmu_map(pagetable,
154 global_pt_entries[i].memdesc);
155
156 if (ret)
157 return ret;
158 }
159 }
160
161 return 0;
162}
163
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600164void kgsl_iommu_unmap_global_secure_pt_entry(struct kgsl_device *device,
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700165 struct kgsl_memdesc *memdesc)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700166{
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700167 if (!kgsl_mmu_is_secured(&device->mmu) || memdesc == NULL)
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600168 return;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700169
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700170 /* Check if an empty memdesc got passed in */
171 if ((memdesc->gpuaddr == 0) || (memdesc->size == 0))
172 return;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700173
Harshdeep Dhattae25cb62018-01-29 10:19:59 -0700174 if (memdesc->pagetable) {
175 if (memdesc->pagetable->name == KGSL_MMU_SECURE_PT)
176 kgsl_mmu_unmap(memdesc->pagetable, memdesc);
177 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700178}
179
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600180int kgsl_iommu_map_global_secure_pt_entry(struct kgsl_device *device,
181 struct kgsl_memdesc *entry)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700182{
183 int ret = 0;
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600184
185 if (!kgsl_mmu_is_secured(&device->mmu))
186 return -ENOTSUPP;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700187
188 if (entry != NULL) {
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600189 struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700190 entry->pagetable = pagetable;
Deepak Kumar756d6a92017-11-28 16:58:29 +0530191 entry->gpuaddr = KGSL_IOMMU_SECURE_BASE(&device->mmu) +
192 secure_global_size;
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600193
Shrenuj Bansala419c792016-10-20 14:05:11 -0700194 ret = kgsl_mmu_map(pagetable, entry);
Harshdeep Dhatt1f408332017-03-27 11:35:13 -0600195 if (ret == 0)
196 secure_global_size += entry->size;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700197 }
198 return ret;
199}
200
201static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu,
202 struct kgsl_memdesc *memdesc)
203{
204 int i;
205
206 if (memdesc->gpuaddr == 0 || !(memdesc->priv & KGSL_MEMDESC_GLOBAL))
207 return;
208
209 for (i = 0; i < global_pt_count; i++) {
210 if (global_pt_entries[i].memdesc == memdesc) {
Jordan Crouse49967ff2019-09-09 10:41:36 -0600211 u64 offset = memdesc->gpuaddr -
212 KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
213
214 bitmap_clear(global_map, offset >> PAGE_SHIFT,
215 kgsl_memdesc_footprint(memdesc) >> PAGE_SHIFT);
216
Shrenuj Bansala419c792016-10-20 14:05:11 -0700217 memdesc->gpuaddr = 0;
218 memdesc->priv &= ~KGSL_MEMDESC_GLOBAL;
219 global_pt_entries[i].memdesc = NULL;
220 return;
221 }
222 }
223}
224
225static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
226 struct kgsl_memdesc *memdesc, const char *name)
227{
Jordan Crousef7f87a62019-09-11 08:32:15 -0600228 u32 bit, start = 0;
Jordan Crouse49967ff2019-09-09 10:41:36 -0600229 u64 size = kgsl_memdesc_footprint(memdesc);
230
Shrenuj Bansala419c792016-10-20 14:05:11 -0700231 if (memdesc->gpuaddr != 0)
232 return;
233
Jordan Crouse49967ff2019-09-09 10:41:36 -0600234 if (WARN_ON(global_pt_count >= GLOBAL_PT_ENTRIES))
Shrenuj Bansala419c792016-10-20 14:05:11 -0700235 return;
236
Jordan Crousef7f87a62019-09-11 08:32:15 -0600237 if (WARN_ON(size > KGSL_IOMMU_GLOBAL_MEM_SIZE))
238 return;
Jordan Crouse49967ff2019-09-09 10:41:36 -0600239
Jordan Crousef7f87a62019-09-11 08:32:15 -0600240 if (memdesc->priv & KGSL_MEMDESC_RANDOM) {
241 u32 range = GLOBAL_MAP_PAGES - (size >> PAGE_SHIFT);
242
243 start = get_random_int() % range;
244 }
245
246 while (start >= 0) {
247 bit = bitmap_find_next_zero_area(global_map, GLOBAL_MAP_PAGES,
248 start, size >> PAGE_SHIFT, 0);
249
250 if (bit < GLOBAL_MAP_PAGES)
251 break;
252
253 start--;
254 }
255
256 if (WARN_ON(start < 0))
Jordan Crouse49967ff2019-09-09 10:41:36 -0600257 return;
258
259 memdesc->gpuaddr =
260 KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + (bit << PAGE_SHIFT);
261
262 bitmap_set(global_map, bit, size >> PAGE_SHIFT);
Deepak Kumar756d6a92017-11-28 16:58:29 +0530263
Shrenuj Bansala419c792016-10-20 14:05:11 -0700264 memdesc->priv |= KGSL_MEMDESC_GLOBAL;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700265
266 global_pt_entries[global_pt_count].memdesc = memdesc;
267 strlcpy(global_pt_entries[global_pt_count].name, name,
268 sizeof(global_pt_entries[global_pt_count].name));
269 global_pt_count++;
270}
271
Shrenuj Bansala419c792016-10-20 14:05:11 -0700272struct kgsl_memdesc *kgsl_iommu_get_qdss_global_entry(void)
273{
274 return &gpu_qdss_desc;
275}
276
277static void kgsl_setup_qdss_desc(struct kgsl_device *device)
278{
279 int result = 0;
280 uint32_t gpu_qdss_entry[2];
281
282 if (!of_find_property(device->pdev->dev.of_node,
283 "qcom,gpu-qdss-stm", NULL))
284 return;
285
286 if (of_property_read_u32_array(device->pdev->dev.of_node,
287 "qcom,gpu-qdss-stm", gpu_qdss_entry, 2)) {
288 KGSL_CORE_ERR("Failed to read gpu qdss dts entry\n");
289 return;
290 }
291
Lynus Vaz90d98b52018-04-09 14:45:36 +0530292 kgsl_memdesc_init(device, &gpu_qdss_desc, 0);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700293 gpu_qdss_desc.priv = 0;
294 gpu_qdss_desc.physaddr = gpu_qdss_entry[0];
295 gpu_qdss_desc.size = gpu_qdss_entry[1];
296 gpu_qdss_desc.pagetable = NULL;
297 gpu_qdss_desc.ops = NULL;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700298 gpu_qdss_desc.hostptr = NULL;
299
300 result = memdesc_sg_dma(&gpu_qdss_desc, gpu_qdss_desc.physaddr,
301 gpu_qdss_desc.size);
302 if (result) {
303 KGSL_CORE_ERR("memdesc_sg_dma failed: %d\n", result);
304 return;
305 }
306
307 kgsl_mmu_add_global(device, &gpu_qdss_desc, "gpu-qdss");
308}
309
310static inline void kgsl_cleanup_qdss_desc(struct kgsl_mmu *mmu)
311{
312 kgsl_iommu_remove_global(mmu, &gpu_qdss_desc);
313 kgsl_sharedmem_free(&gpu_qdss_desc);
314}
315
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700316struct kgsl_memdesc *kgsl_iommu_get_qtimer_global_entry(void)
317{
318 return &gpu_qtimer_desc;
319}
320
321static void kgsl_setup_qtimer_desc(struct kgsl_device *device)
322{
323 int result = 0;
324 uint32_t gpu_qtimer_entry[2];
325
326 if (!of_find_property(device->pdev->dev.of_node,
327 "qcom,gpu-qtimer", NULL))
328 return;
329
330 if (of_property_read_u32_array(device->pdev->dev.of_node,
331 "qcom,gpu-qtimer", gpu_qtimer_entry, 2)) {
332 KGSL_CORE_ERR("Failed to read gpu qtimer dts entry\n");
333 return;
334 }
335
Lynus Vaz90d98b52018-04-09 14:45:36 +0530336 kgsl_memdesc_init(device, &gpu_qtimer_desc, 0);
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700337 gpu_qtimer_desc.priv = 0;
338 gpu_qtimer_desc.physaddr = gpu_qtimer_entry[0];
339 gpu_qtimer_desc.size = gpu_qtimer_entry[1];
340 gpu_qtimer_desc.pagetable = NULL;
341 gpu_qtimer_desc.ops = NULL;
Jonathan Wicks4892d8d2017-02-24 16:21:26 -0700342 gpu_qtimer_desc.hostptr = NULL;
343
344 result = memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
345 gpu_qtimer_desc.size);
346 if (result) {
347 KGSL_CORE_ERR("memdesc_sg_dma failed: %d\n", result);
348 return;
349 }
350
351 kgsl_mmu_add_global(device, &gpu_qtimer_desc, "gpu-qtimer");
352}
353
354static inline void kgsl_cleanup_qtimer_desc(struct kgsl_mmu *mmu)
355{
356 kgsl_iommu_remove_global(mmu, &gpu_qtimer_desc);
357 kgsl_sharedmem_free(&gpu_qtimer_desc);
358}
Shrenuj Bansala419c792016-10-20 14:05:11 -0700359
360static inline void _iommu_sync_mmu_pc(bool lock)
361{
362 if (need_iommu_sync == false)
363 return;
364
365 if (lock)
366 mutex_lock(&kgsl_mmu_sync);
367 else
368 mutex_unlock(&kgsl_mmu_sync);
369}
370
371static void _detach_pt(struct kgsl_iommu_pt *iommu_pt,
372 struct kgsl_iommu_context *ctx)
373{
374 if (iommu_pt->attached) {
375 _iommu_sync_mmu_pc(true);
376 iommu_detach_device(iommu_pt->domain, ctx->dev);
377 _iommu_sync_mmu_pc(false);
378 iommu_pt->attached = false;
379 }
380}
381
382static int _attach_pt(struct kgsl_iommu_pt *iommu_pt,
383 struct kgsl_iommu_context *ctx)
384{
385 int ret;
386
387 if (iommu_pt->attached)
388 return 0;
389
390 _iommu_sync_mmu_pc(true);
391 ret = iommu_attach_device(iommu_pt->domain, ctx->dev);
392 _iommu_sync_mmu_pc(false);
393
394 if (ret == 0)
395 iommu_pt->attached = true;
396
397 return ret;
398}
399
Shrenuj Bansala419c792016-10-20 14:05:11 -0700400static int _iommu_map_sync_pc(struct kgsl_pagetable *pt,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700401 uint64_t gpuaddr, phys_addr_t physaddr,
402 uint64_t size, unsigned int flags)
403{
404 struct kgsl_iommu_pt *iommu_pt = pt->priv;
405 int ret;
406
Shrenuj Bansala419c792016-10-20 14:05:11 -0700407 _iommu_sync_mmu_pc(true);
408
409 ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr, size, flags);
410
411 _iommu_sync_mmu_pc(false);
412
Shrenuj Bansala419c792016-10-20 14:05:11 -0700413 if (ret) {
414 KGSL_CORE_ERR("map err: 0x%016llX, 0x%llx, 0x%x, %d\n",
415 gpuaddr, size, flags, ret);
416 return -ENODEV;
417 }
418
419 return 0;
420}
421
422static int _iommu_unmap_sync_pc(struct kgsl_pagetable *pt,
Carter Coopera1c7cce2017-12-15 13:29:29 -0700423 uint64_t addr, uint64_t size)
Shrenuj Bansala419c792016-10-20 14:05:11 -0700424{
425 struct kgsl_iommu_pt *iommu_pt = pt->priv;
426 size_t unmapped = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700427
428 _iommu_sync_mmu_pc(true);
429
430 unmapped = iommu_unmap(iommu_pt->domain, addr, size);
431
432 _iommu_sync_mmu_pc(false);
433
Shrenuj Bansala419c792016-10-20 14:05:11 -0700434 if (unmapped != size) {
435 KGSL_CORE_ERR("unmap err: 0x%016llx, 0x%llx, %zd\n",
436 addr, size, unmapped);
437 return -ENODEV;
438 }
439
440 return 0;
441}
442
443static int _iommu_map_sg_offset_sync_pc(struct kgsl_pagetable *pt,
Carter Coopera1c7cce2017-12-15 13:29:29 -0700444 uint64_t addr, struct scatterlist *sg, int nents,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700445 uint64_t offset, uint64_t size, unsigned int flags)
446{
447 struct kgsl_iommu_pt *iommu_pt = pt->priv;
448 uint64_t offset_tmp = offset;
449 uint64_t size_tmp = size;
450 size_t mapped = 0;
451 unsigned int i;
452 struct scatterlist *s;
453 phys_addr_t physaddr;
454 int ret;
455
Shrenuj Bansala419c792016-10-20 14:05:11 -0700456 _iommu_sync_mmu_pc(true);
457
458 for_each_sg(sg, s, nents, i) {
459 /* Iterate until we find the offset */
460 if (offset_tmp >= s->length) {
461 offset_tmp -= s->length;
462 continue;
463 }
464
465 /* How much mapping is needed in this sg? */
466 if (size < s->length - offset_tmp)
467 size_tmp = size;
468 else
469 size_tmp = s->length - offset_tmp;
470
471 /* Get the phys addr for the offset page */
472 if (offset_tmp != 0) {
473 physaddr = page_to_phys(nth_page(sg_page(s),
474 offset_tmp >> PAGE_SHIFT));
475 /* Reset offset_tmp */
476 offset_tmp = 0;
477 } else
478 physaddr = page_to_phys(sg_page(s));
479
480 /* Do the map for this sg */
481 ret = iommu_map(iommu_pt->domain, addr + mapped,
482 physaddr, size_tmp, flags);
483 if (ret)
484 break;
485
486 mapped += size_tmp;
487 size -= size_tmp;
488
489 if (size == 0)
490 break;
491 }
492
493 _iommu_sync_mmu_pc(false);
494
Shrenuj Bansala419c792016-10-20 14:05:11 -0700495 if (size != 0) {
496 /* Cleanup on error */
Carter Coopera1c7cce2017-12-15 13:29:29 -0700497 _iommu_unmap_sync_pc(pt, addr, mapped);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700498 KGSL_CORE_ERR(
499 "map sg offset err: 0x%016llX, %d, %x, %zd\n",
500 addr, nents, flags, mapped);
501 return -ENODEV;
502 }
503
504 return 0;
505}
506
507static int _iommu_map_sg_sync_pc(struct kgsl_pagetable *pt,
Carter Coopera1c7cce2017-12-15 13:29:29 -0700508 uint64_t addr, struct scatterlist *sg, int nents,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700509 unsigned int flags)
510{
511 struct kgsl_iommu_pt *iommu_pt = pt->priv;
512 size_t mapped;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700513
514 _iommu_sync_mmu_pc(true);
515
516 mapped = iommu_map_sg(iommu_pt->domain, addr, sg, nents, flags);
517
518 _iommu_sync_mmu_pc(false);
519
Shrenuj Bansala419c792016-10-20 14:05:11 -0700520 if (mapped == 0) {
521 KGSL_CORE_ERR("map sg err: 0x%016llX, %d, %x, %zd\n",
522 addr, nents, flags, mapped);
523 return -ENODEV;
524 }
525
526 return 0;
527}
528
529/*
530 * One page allocation for a guard region to protect against over-zealous
531 * GPU pre-fetch
532 */
533
534static struct page *kgsl_guard_page;
535static struct kgsl_memdesc kgsl_secure_guard_page_memdesc;
536
537/*
538 * The dummy page is a placeholder/extra page to be used for sparse mappings.
539 * This page will be mapped to all virtual sparse bindings that are not
540 * physically backed.
541 */
542static struct page *kgsl_dummy_page;
543
544/* These functions help find the nearest allocated memory entries on either side
545 * of a faulting address. If we know the nearby allocations memory we can
546 * get a better determination of what we think should have been located in the
547 * faulting region
548 */
549
550/*
551 * A local structure to make it easy to store the interesting bits for the
552 * memory entries on either side of the faulting address
553 */
554
555struct _mem_entry {
556 uint64_t gpuaddr;
557 uint64_t size;
558 uint64_t flags;
559 unsigned int priv;
560 int pending_free;
561 pid_t pid;
562 char name[32];
563};
564
565static void _get_global_entries(uint64_t faultaddr,
566 struct _mem_entry *prev,
567 struct _mem_entry *next)
568{
569 int i;
570 uint64_t prevaddr = 0;
571 struct global_pt_entry *p = NULL;
572
573 uint64_t nextaddr = (uint64_t) -1;
574 struct global_pt_entry *n = NULL;
575
576 for (i = 0; i < global_pt_count; i++) {
577 uint64_t addr;
578
579 if (global_pt_entries[i].memdesc == NULL)
580 continue;
581
582 addr = global_pt_entries[i].memdesc->gpuaddr;
583 if ((addr < faultaddr) && (addr > prevaddr)) {
584 prevaddr = addr;
585 p = &global_pt_entries[i];
586 }
587
588 if ((addr > faultaddr) && (addr < nextaddr)) {
589 nextaddr = addr;
590 n = &global_pt_entries[i];
591 }
592 }
593
594 if (p != NULL) {
595 prev->gpuaddr = p->memdesc->gpuaddr;
596 prev->size = p->memdesc->size;
597 prev->flags = p->memdesc->flags;
598 prev->priv = p->memdesc->priv;
599 prev->pid = 0;
600 strlcpy(prev->name, p->name, sizeof(prev->name));
601 }
602
603 if (n != NULL) {
604 next->gpuaddr = n->memdesc->gpuaddr;
605 next->size = n->memdesc->size;
606 next->flags = n->memdesc->flags;
607 next->priv = n->memdesc->priv;
608 next->pid = 0;
609 strlcpy(next->name, n->name, sizeof(next->name));
610 }
611}
612
613void __kgsl_get_memory_usage(struct _mem_entry *entry)
614{
615 kgsl_get_memory_usage(entry->name, sizeof(entry->name), entry->flags);
616}
617
618static void _get_entries(struct kgsl_process_private *private,
619 uint64_t faultaddr, struct _mem_entry *prev,
620 struct _mem_entry *next)
621{
622 int id;
623 struct kgsl_mem_entry *entry;
624
625 uint64_t prevaddr = 0;
626 struct kgsl_mem_entry *p = NULL;
627
628 uint64_t nextaddr = (uint64_t) -1;
629 struct kgsl_mem_entry *n = NULL;
630
631 idr_for_each_entry(&private->mem_idr, entry, id) {
632 uint64_t addr = entry->memdesc.gpuaddr;
633
634 if ((addr < faultaddr) && (addr > prevaddr)) {
635 prevaddr = addr;
636 p = entry;
637 }
638
639 if ((addr > faultaddr) && (addr < nextaddr)) {
640 nextaddr = addr;
641 n = entry;
642 }
643 }
644
645 if (p != NULL) {
646 prev->gpuaddr = p->memdesc.gpuaddr;
647 prev->size = p->memdesc.size;
648 prev->flags = p->memdesc.flags;
649 prev->priv = p->memdesc.priv;
650 prev->pending_free = p->pending_free;
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530651 prev->pid = pid_nr(private->pid);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700652 __kgsl_get_memory_usage(prev);
653 }
654
655 if (n != NULL) {
656 next->gpuaddr = n->memdesc.gpuaddr;
657 next->size = n->memdesc.size;
658 next->flags = n->memdesc.flags;
659 next->priv = n->memdesc.priv;
660 next->pending_free = n->pending_free;
Archana Sriramd66ae7b2020-10-18 23:34:04 +0530661 next->pid = pid_nr(private->pid);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700662 __kgsl_get_memory_usage(next);
663 }
664}
665
666static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
667 struct _mem_entry *preventry, struct _mem_entry *nextentry,
668 struct kgsl_context *context)
669{
670 struct kgsl_process_private *private;
671
672 memset(preventry, 0, sizeof(*preventry));
673 memset(nextentry, 0, sizeof(*nextentry));
674
675 /* Set the maximum possible size as an initial value */
676 nextentry->gpuaddr = (uint64_t) -1;
677
Deepak Kumar756d6a92017-11-28 16:58:29 +0530678 if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
Shrenuj Bansala419c792016-10-20 14:05:11 -0700679 _get_global_entries(faultaddr, preventry, nextentry);
680 } else if (context) {
681 private = context->proc_priv;
682 spin_lock(&private->mem_lock);
683 _get_entries(private, faultaddr, preventry, nextentry);
684 spin_unlock(&private->mem_lock);
685 }
686}
687
688static void _print_entry(struct kgsl_device *device, struct _mem_entry *entry)
689{
690 KGSL_LOG_DUMP(device,
691 "[%016llX - %016llX] %s %s (pid = %d) (%s)\n",
692 entry->gpuaddr,
693 entry->gpuaddr + entry->size,
694 entry->priv & KGSL_MEMDESC_GUARD_PAGE ? "(+guard)" : "",
695 entry->pending_free ? "(pending free)" : "",
696 entry->pid, entry->name);
697}
698
699static void _check_if_freed(struct kgsl_iommu_context *ctx,
700 uint64_t addr, pid_t ptname)
701{
702 uint64_t gpuaddr = addr;
703 uint64_t size = 0;
704 uint64_t flags = 0;
705 pid_t pid;
706
707 char name[32];
708
709 memset(name, 0, sizeof(name));
710
711 if (kgsl_memfree_find_entry(ptname, &gpuaddr, &size, &flags, &pid)) {
712 kgsl_get_memory_usage(name, sizeof(name) - 1, flags);
713 KGSL_LOG_DUMP(ctx->kgsldev, "---- premature free ----\n");
714 KGSL_LOG_DUMP(ctx->kgsldev,
715 "[%8.8llX-%8.8llX] (%s) was already freed by pid %d\n",
716 gpuaddr, gpuaddr + size, name, pid);
717 }
718}
719
720static bool
721kgsl_iommu_uche_overfetch(struct kgsl_process_private *private,
722 uint64_t faultaddr)
723{
724 int id;
725 struct kgsl_mem_entry *entry = NULL;
726
727 spin_lock(&private->mem_lock);
728 idr_for_each_entry(&private->mem_idr, entry, id) {
729 struct kgsl_memdesc *m = &entry->memdesc;
730
731 if ((faultaddr >= (m->gpuaddr + m->size))
732 && (faultaddr < (m->gpuaddr + m->size + 64))) {
733 spin_unlock(&private->mem_lock);
734 return true;
735 }
736 }
737 spin_unlock(&private->mem_lock);
738 return false;
739}
740
741/*
742 * Read pagefaults where the faulting address lies within the first 64 bytes
743 * of a page (UCHE line size is 64 bytes) and the fault page is preceded by a
744 * valid allocation are considered likely due to UCHE overfetch and suppressed.
745 */
746
747static bool kgsl_iommu_suppress_pagefault(uint64_t faultaddr, int write,
748 struct kgsl_context *context)
749{
750 /*
751 * If there is no context associated with the pagefault then this
752 * could be a fault on a global buffer. We do not suppress faults
753 * on global buffers as they are mainly accessed by the CP bypassing
754 * the UCHE. Also, write pagefaults are never suppressed.
755 */
756 if (!context || write)
757 return false;
758
759 return kgsl_iommu_uche_overfetch(context->proc_priv, faultaddr);
760}
761
762static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
763 struct device *dev, unsigned long addr, int flags, void *token)
764{
765 int ret = 0;
766 struct kgsl_pagetable *pt = token;
767 struct kgsl_mmu *mmu = pt->mmu;
768 struct kgsl_iommu *iommu;
769 struct kgsl_iommu_context *ctx;
770 u64 ptbase;
771 u32 contextidr;
Lynus Vaze0a01312017-11-08 19:39:31 +0530772 pid_t pid = 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700773 pid_t ptname;
774 struct _mem_entry prev, next;
775 int write;
776 struct kgsl_device *device;
777 struct adreno_device *adreno_dev;
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530778 struct adreno_gpudev *gpudev;
Shrenuj Bansala419c792016-10-20 14:05:11 -0700779 unsigned int no_page_fault_log = 0;
780 unsigned int curr_context_id = 0;
781 struct kgsl_context *context;
782 char *fault_type = "unknown";
783
784 static DEFINE_RATELIMIT_STATE(_rs,
785 DEFAULT_RATELIMIT_INTERVAL,
786 DEFAULT_RATELIMIT_BURST);
787
788 if (mmu == NULL)
789 return ret;
790
791 iommu = _IOMMU_PRIV(mmu);
792 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
793 device = KGSL_MMU_DEVICE(mmu);
794 adreno_dev = ADRENO_DEVICE(device);
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530795 gpudev = ADRENO_GPU_DEVICE(adreno_dev);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700796
797 if (pt->name == KGSL_MMU_SECURE_PT)
798 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
799
800 /*
801 * set the fault bits and stuff before any printks so that if fault
802 * handler runs then it will know it's dealing with a pagefault.
803 * Read the global current timestamp because we could be in middle of
804 * RB switch and hence the cur RB may not be reliable but global
805 * one will always be reliable
806 */
807 kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
808 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
809
810 context = kgsl_context_get(device, curr_context_id);
811
812 write = (flags & IOMMU_FAULT_WRITE) ? 1 : 0;
813 if (flags & IOMMU_FAULT_TRANSLATION)
814 fault_type = "translation";
815 else if (flags & IOMMU_FAULT_PERMISSION)
816 fault_type = "permission";
Deepak Kumar8267e992018-04-26 11:16:55 +0530817 else if (flags & IOMMU_FAULT_EXTERNAL)
818 fault_type = "external";
819 else if (flags & IOMMU_FAULT_TRANSACTION_STALLED)
820 fault_type = "transaction stalled";
Shrenuj Bansala419c792016-10-20 14:05:11 -0700821
822 if (kgsl_iommu_suppress_pagefault(addr, write, context)) {
823 iommu->pagefault_suppression_count++;
824 kgsl_context_put(context);
825 return ret;
826 }
827
828 if (context != NULL) {
829 /* save pagefault timestamp for GFT */
830 set_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, &context->priv);
shaohanlin869e44f2021-07-01 08:44:30 +0800831 pid = pid_nr(context->proc_priv->pid);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700832 }
833
834 ctx->fault = 1;
835
836 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
837 &adreno_dev->ft_pf_policy) &&
838 (flags & IOMMU_FAULT_TRANSACTION_STALLED)) {
839 /*
840 * Turn off GPU IRQ so we don't get faults from it too.
841 * The device mutex must be held to change power state
842 */
843 mutex_lock(&device->mutex);
844 kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
845 mutex_unlock(&device->mutex);
846 }
847
848 ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
849 contextidr = KGSL_IOMMU_GET_CTX_REG(ctx, CONTEXTIDR);
850
851 ptname = MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ?
Lynus Vaze0a01312017-11-08 19:39:31 +0530852 KGSL_MMU_GLOBAL_PT : pid;
Sunil Khatri86e95682017-01-23 17:10:32 +0530853 /*
854 * Trace needs to be logged before searching the faulting
855 * address in free list as it takes quite long time in
856 * search and delays the trace unnecessarily.
857 */
858 trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
859 ptname, write ? "write" : "read");
Shrenuj Bansala419c792016-10-20 14:05:11 -0700860
861 if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE,
862 &adreno_dev->ft_pf_policy))
863 no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
864
865 if (!no_page_fault_log && __ratelimit(&_rs)) {
Rajesh Kemisettic05883a2018-09-17 11:34:08 +0530866 const char *api_str;
867
868 if (context != NULL) {
869 struct adreno_context *drawctxt =
870 ADRENO_CONTEXT(context);
871
872 api_str = get_api_type_str(drawctxt->type);
873 } else
874 api_str = "UNKNOWN";
875
Shrenuj Bansala419c792016-10-20 14:05:11 -0700876 KGSL_MEM_CRIT(ctx->kgsldev,
877 "GPU PAGE FAULT: addr = %lX pid= %d\n", addr, ptname);
878 KGSL_MEM_CRIT(ctx->kgsldev,
Rajesh Kemisettic05883a2018-09-17 11:34:08 +0530879 "context=%s ctx_type=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
880 ctx->name, api_str, ptbase, contextidr,
Shrenuj Bansala419c792016-10-20 14:05:11 -0700881 write ? "write" : "read", fault_type);
882
Lynus Vaz1fde74d2017-03-20 18:02:47 +0530883 if (gpudev->iommu_fault_block) {
884 unsigned int fsynr1;
885
886 fsynr1 = KGSL_IOMMU_GET_CTX_REG(ctx, FSYNR1);
887 KGSL_MEM_CRIT(ctx->kgsldev,
888 "FAULTING BLOCK: %s\n",
889 gpudev->iommu_fault_block(adreno_dev,
890 fsynr1));
891 }
892
Shrenuj Bansala419c792016-10-20 14:05:11 -0700893 /* Don't print the debug if this is a permissions fault */
894 if (!(flags & IOMMU_FAULT_PERMISSION)) {
895 _check_if_freed(ctx, addr, ptname);
896
897 KGSL_LOG_DUMP(ctx->kgsldev,
898 "---- nearby memory ----\n");
899
900 _find_mem_entries(mmu, addr, &prev, &next, context);
901 if (prev.gpuaddr)
902 _print_entry(ctx->kgsldev, &prev);
903 else
904 KGSL_LOG_DUMP(ctx->kgsldev, "*EMPTY*\n");
905
906 KGSL_LOG_DUMP(ctx->kgsldev, " <- fault @ %8.8lX\n",
907 addr);
908
909 if (next.gpuaddr != (uint64_t) -1)
910 _print_entry(ctx->kgsldev, &next);
911 else
912 KGSL_LOG_DUMP(ctx->kgsldev, "*EMPTY*\n");
913 }
914 }
915
Shrenuj Bansala419c792016-10-20 14:05:11 -0700916
917 /*
918 * We do not want the h/w to resume fetching data from an iommu
919 * that has faulted, this is better for debugging as it will stall
920 * the GPU and trigger a snapshot. Return EBUSY error.
921 */
922 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
923 &adreno_dev->ft_pf_policy) &&
924 (flags & IOMMU_FAULT_TRANSACTION_STALLED)) {
925 uint32_t sctlr_val;
926
927 ret = -EBUSY;
928 /*
929 * Disable context fault interrupts
930 * as we do not clear FSR in the ISR.
931 * Will be re-enabled after FSR is cleared.
932 */
933 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
934 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
935 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
936
937 adreno_set_gpu_fault(adreno_dev, ADRENO_IOMMU_PAGE_FAULT);
938 /* Go ahead with recovery*/
939 adreno_dispatcher_schedule(device);
940 }
941
942 kgsl_context_put(context);
943 return ret;
944}
945
946/*
947 * kgsl_iommu_disable_clk() - Disable iommu clocks
948 * Disable IOMMU clocks
949 */
950static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
951{
952 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
953 int j;
954
955 atomic_dec(&iommu->clk_enable_count);
956
957 /*
958 * Make sure the clk refcounts are good. An unbalance may
959 * cause the clocks to be off when we need them on.
960 */
961 WARN_ON(atomic_read(&iommu->clk_enable_count) < 0);
962
963 for (j = (KGSL_IOMMU_MAX_CLKS - 1); j >= 0; j--)
964 if (iommu->clks[j])
965 clk_disable_unprepare(iommu->clks[j]);
966}
967
968/*
969 * kgsl_iommu_enable_clk_prepare_enable - Enable the specified IOMMU clock
970 * Try 4 times to enable it and then BUG() for debug
971 */
972static void kgsl_iommu_clk_prepare_enable(struct clk *clk)
973{
974 int num_retries = 4;
975
976 while (num_retries--) {
977 if (!clk_prepare_enable(clk))
978 return;
979 }
980
981 /* Failure is fatal so BUG() to facilitate debug */
982 KGSL_CORE_ERR("IOMMU clock enable failed\n");
983 BUG();
984}
985
986/*
987 * kgsl_iommu_enable_clk - Enable iommu clocks
988 * Enable all the IOMMU clocks
989 */
990static void kgsl_iommu_enable_clk(struct kgsl_mmu *mmu)
991{
992 int j;
993 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
994
995 for (j = 0; j < KGSL_IOMMU_MAX_CLKS; j++) {
996 if (iommu->clks[j])
997 kgsl_iommu_clk_prepare_enable(iommu->clks[j]);
998 }
999 atomic_inc(&iommu->clk_enable_count);
1000}
1001
1002/* kgsl_iommu_get_ttbr0 - Get TTBR0 setting for a pagetable */
1003static u64 kgsl_iommu_get_ttbr0(struct kgsl_pagetable *pt)
1004{
1005 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
1006
1007 BUG_ON(iommu_pt == NULL);
1008
1009 return iommu_pt->ttbr0;
1010}
1011
1012static bool kgsl_iommu_pt_equal(struct kgsl_mmu *mmu,
1013 struct kgsl_pagetable *pt,
1014 u64 ttbr0)
1015{
1016 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
1017 u64 domain_ttbr0;
1018
1019 if (iommu_pt == NULL)
1020 return 0;
1021
1022 domain_ttbr0 = kgsl_iommu_get_ttbr0(pt);
1023
1024 return (domain_ttbr0 == ttbr0);
1025}
1026
1027/* kgsl_iommu_get_contextidr - query CONTEXTIDR setting for a pagetable */
1028static u32 kgsl_iommu_get_contextidr(struct kgsl_pagetable *pt)
1029{
1030 struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
1031
1032 BUG_ON(iommu_pt == NULL);
1033
1034 return iommu_pt->contextidr;
1035}
1036
1037/*
1038 * kgsl_iommu_destroy_pagetable - Free up reaources help by a pagetable
1039 * @mmu_specific_pt - Pointer to pagetable which is to be freed
1040 *
1041 * Return - void
1042 */
1043static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt)
1044{
1045 struct kgsl_iommu_pt *iommu_pt = pt->priv;
1046 struct kgsl_mmu *mmu = pt->mmu;
1047 struct kgsl_iommu *iommu;
1048 struct kgsl_iommu_context *ctx;
1049
1050 /*
1051 * Make sure all allocations are unmapped before destroying
1052 * the pagetable
1053 */
1054 WARN_ON(!list_empty(&pt->list));
1055
1056 iommu = _IOMMU_PRIV(mmu);
1057
1058 if (pt->name == KGSL_MMU_SECURE_PT) {
1059 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
Shrenuj Bansala419c792016-10-20 14:05:11 -07001060 } else {
1061 ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1062 kgsl_iommu_unmap_globals(pt);
1063 }
1064
1065 if (iommu_pt->domain) {
1066 trace_kgsl_pagetable_destroy(iommu_pt->ttbr0, pt->name);
1067
1068 _detach_pt(iommu_pt, ctx);
1069
1070 iommu_domain_free(iommu_pt->domain);
1071 }
1072
1073 kfree(iommu_pt);
1074}
1075
1076static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
1077 struct kgsl_pagetable *pagetable,
1078 struct kgsl_iommu_pt *pt)
1079{
Shrenuj Bansala419c792016-10-20 14:05:11 -07001080 if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
Deepak Kumar756d6a92017-11-28 16:58:29 +05301081 pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1082 pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
1083 pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1084 pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001085 } else {
1086 pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301087 pt->compat_va_end = KGSL_IOMMU_SECURE_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001088 pt->va_start = KGSL_IOMMU_VA_BASE64;
1089 pt->va_end = KGSL_IOMMU_VA_END64;
1090 }
1091
1092 if (pagetable->name != KGSL_MMU_GLOBAL_PT &&
1093 pagetable->name != KGSL_MMU_SECURE_PT) {
Deepak Kumarcf056d12018-04-17 15:59:42 +05301094 if (kgsl_is_compat_task()) {
Shrenuj Bansala419c792016-10-20 14:05:11 -07001095 pt->svm_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301096 pt->svm_end = KGSL_IOMMU_SECURE_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001097 } else {
1098 pt->svm_start = KGSL_IOMMU_SVM_BASE64;
1099 pt->svm_end = KGSL_IOMMU_SVM_END64;
1100 }
1101 }
1102}
1103
1104static void setup_32bit_pagetable(struct kgsl_mmu *mmu,
1105 struct kgsl_pagetable *pagetable,
1106 struct kgsl_iommu_pt *pt)
1107{
Shrenuj Bansala419c792016-10-20 14:05:11 -07001108 if (mmu->secured) {
1109 if (pagetable->name == KGSL_MMU_SECURE_PT) {
Deepak Kumar756d6a92017-11-28 16:58:29 +05301110 pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1111 pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
1112 pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu);
1113 pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001114 } else {
1115 pt->va_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301116 pt->va_end = KGSL_IOMMU_SECURE_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001117 pt->compat_va_start = pt->va_start;
1118 pt->compat_va_end = pt->va_end;
1119 }
1120 } else {
1121 pt->va_start = KGSL_IOMMU_SVM_BASE32;
Deepak Kumar756d6a92017-11-28 16:58:29 +05301122 pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001123 pt->compat_va_start = pt->va_start;
1124 pt->compat_va_end = pt->va_end;
1125 }
1126
1127 if (pagetable->name != KGSL_MMU_GLOBAL_PT &&
1128 pagetable->name != KGSL_MMU_SECURE_PT) {
1129 pt->svm_start = KGSL_IOMMU_SVM_BASE32;
1130 pt->svm_end = KGSL_IOMMU_SVM_END32;
1131 }
1132}
1133
1134
1135static struct kgsl_iommu_pt *
1136_alloc_pt(struct device *dev, struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1137{
1138 struct kgsl_iommu_pt *iommu_pt;
1139 struct bus_type *bus = kgsl_mmu_get_bus(dev);
1140
1141 if (bus == NULL)
1142 return ERR_PTR(-ENODEV);
1143
1144 iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
1145 if (iommu_pt == NULL)
1146 return ERR_PTR(-ENOMEM);
1147
1148 iommu_pt->domain = iommu_domain_alloc(bus);
1149 if (iommu_pt->domain == NULL) {
1150 kfree(iommu_pt);
1151 return ERR_PTR(-ENODEV);
1152 }
1153
1154 pt->pt_ops = &iommu_pt_ops;
1155 pt->priv = iommu_pt;
1156 pt->fault_addr = ~0ULL;
1157 iommu_pt->rbtree = RB_ROOT;
1158
1159 if (MMU_FEATURE(mmu, KGSL_MMU_64BIT))
1160 setup_64bit_pagetable(mmu, pt, iommu_pt);
1161 else
1162 setup_32bit_pagetable(mmu, pt, iommu_pt);
1163
1164
1165 return iommu_pt;
1166}
1167
1168static void _free_pt(struct kgsl_iommu_context *ctx, struct kgsl_pagetable *pt)
1169{
1170 struct kgsl_iommu_pt *iommu_pt = pt->priv;
1171
1172 pt->pt_ops = NULL;
1173 pt->priv = NULL;
1174
1175 if (iommu_pt == NULL)
1176 return;
1177
1178 _detach_pt(iommu_pt, ctx);
1179
1180 if (iommu_pt->domain != NULL)
1181 iommu_domain_free(iommu_pt->domain);
1182 kfree(iommu_pt);
1183}
1184
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001185void _enable_gpuhtw_llc(struct kgsl_mmu *mmu, struct kgsl_iommu_pt *iommu_pt)
1186{
1187 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
1188 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1189 int gpuhtw_llc_enable = 1;
1190 int ret;
1191
1192 /* GPU pagetable walk LLC slice not enabled */
1193 if (!adreno_dev->gpuhtw_llc_slice)
1194 return;
1195
1196 /* Domain attribute to enable system cache for GPU pagetable walks */
1197 ret = iommu_domain_set_attr(iommu_pt->domain,
1198 DOMAIN_ATTR_USE_UPSTREAM_HINT, &gpuhtw_llc_enable);
1199 /*
1200 * Warn that the system cache will not be used for GPU
1201 * pagetable walks. This is not a fatal error.
1202 */
1203 WARN_ONCE(ret,
1204 "System cache not enabled for GPU pagetable walks: %d\n", ret);
1205}
1206
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -07001207static int program_smmu_aperture(unsigned int cb, unsigned int aperture_reg)
1208{
1209 struct scm_desc desc = {0};
1210
1211 desc.args[0] = 0xFFFF0000 | ((aperture_reg & 0xff) << 8) | (cb & 0xff);
1212 desc.args[1] = 0xFFFFFFFF;
1213 desc.args[2] = 0xFFFFFFFF;
1214 desc.args[3] = 0xFFFFFFFF;
1215 desc.arginfo = SCM_ARGS(4);
1216
Sunil Khatri82eb1ec2018-01-09 15:28:14 +05301217 return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, CP_SMMU_APERTURE_ID), &desc);
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -07001218}
1219
Shrenuj Bansala419c792016-10-20 14:05:11 -07001220static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1221{
1222 int ret = 0;
1223 struct kgsl_iommu_pt *iommu_pt = NULL;
1224 unsigned int cb_num;
1225 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1226 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1227
1228 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1229
1230 if (IS_ERR(iommu_pt))
1231 return PTR_ERR(iommu_pt);
1232
1233 if (kgsl_mmu_is_perprocess(mmu)) {
1234 ret = iommu_domain_set_attr(iommu_pt->domain,
1235 DOMAIN_ATTR_PROCID, &pt->name);
1236 if (ret) {
1237 KGSL_CORE_ERR("set DOMAIN_ATTR_PROCID failed: %d\n",
1238 ret);
1239 goto done;
1240 }
1241 }
1242
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001243 _enable_gpuhtw_llc(mmu, iommu_pt);
1244
Shrenuj Bansala419c792016-10-20 14:05:11 -07001245 ret = _attach_pt(iommu_pt, ctx);
1246 if (ret)
1247 goto done;
1248
1249 iommu_set_fault_handler(iommu_pt->domain,
1250 kgsl_iommu_fault_handler, pt);
1251
1252 ret = iommu_domain_get_attr(iommu_pt->domain,
1253 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1254 if (ret) {
Shrenuj Bansalc3b15ce2017-06-15 14:48:05 -07001255 KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXT_BANK failed: %d\n",
Shrenuj Bansala419c792016-10-20 14:05:11 -07001256 ret);
1257 goto done;
1258 }
1259
Sunil Khatri82eb1ec2018-01-09 15:28:14 +05301260 if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) &&
1261 scm_is_call_available(SCM_SVC_MP, CP_SMMU_APERTURE_ID)) {
Shrenuj Bansal9a0563b2017-06-15 14:45:15 -07001262 ret = program_smmu_aperture(cb_num, CP_APERTURE_REG);
1263 if (ret) {
1264 pr_err("SMMU aperture programming call failed with error %d\n",
1265 ret);
1266 return ret;
1267 }
1268 }
1269
Shrenuj Bansala419c792016-10-20 14:05:11 -07001270 ctx->cb_num = cb_num;
1271 ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
1272 + (cb_num << KGSL_IOMMU_CB_SHIFT);
1273
1274 ret = iommu_domain_get_attr(iommu_pt->domain,
1275 DOMAIN_ATTR_TTBR0, &iommu_pt->ttbr0);
1276 if (ret) {
1277 KGSL_CORE_ERR("get DOMAIN_ATTR_TTBR0 failed: %d\n",
1278 ret);
1279 goto done;
1280 }
1281 ret = iommu_domain_get_attr(iommu_pt->domain,
1282 DOMAIN_ATTR_CONTEXTIDR, &iommu_pt->contextidr);
1283 if (ret) {
1284 KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXTIDR failed: %d\n",
1285 ret);
1286 goto done;
1287 }
1288
1289 ret = kgsl_iommu_map_globals(pt);
1290
1291done:
1292 if (ret)
1293 _free_pt(ctx, pt);
1294
1295 return ret;
1296}
1297
1298static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1299{
1300 int ret = 0;
1301 struct kgsl_iommu_pt *iommu_pt = NULL;
1302 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1303 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
1304 int secure_vmid = VMID_CP_PIXEL;
1305 unsigned int cb_num;
1306
1307 if (!mmu->secured)
1308 return -EPERM;
1309
1310 if (!MMU_FEATURE(mmu, KGSL_MMU_HYP_SECURE_ALLOC)) {
1311 if (!kgsl_mmu_bus_secured(ctx->dev))
1312 return -EPERM;
1313 }
1314
1315 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1316
1317 if (IS_ERR(iommu_pt))
1318 return PTR_ERR(iommu_pt);
1319
1320 ret = iommu_domain_set_attr(iommu_pt->domain,
1321 DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
1322 if (ret) {
1323 KGSL_CORE_ERR("set DOMAIN_ATTR_SECURE_VMID failed: %d\n", ret);
1324 goto done;
1325 }
1326
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001327 _enable_gpuhtw_llc(mmu, iommu_pt);
1328
Shrenuj Bansala419c792016-10-20 14:05:11 -07001329 ret = _attach_pt(iommu_pt, ctx);
1330
1331 if (MMU_FEATURE(mmu, KGSL_MMU_HYP_SECURE_ALLOC))
1332 iommu_set_fault_handler(iommu_pt->domain,
1333 kgsl_iommu_fault_handler, pt);
1334
1335 ret = iommu_domain_get_attr(iommu_pt->domain,
1336 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1337 if (ret) {
1338 KGSL_CORE_ERR("get DOMAIN_ATTR_PROCID failed: %d\n",
1339 ret);
1340 goto done;
1341 }
1342
1343 ctx->cb_num = cb_num;
1344 ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET
1345 + (cb_num << KGSL_IOMMU_CB_SHIFT);
1346
Shrenuj Bansala419c792016-10-20 14:05:11 -07001347done:
1348 if (ret)
1349 _free_pt(ctx, pt);
1350 return ret;
1351}
1352
1353static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1354{
1355 int ret = 0;
1356 struct kgsl_iommu_pt *iommu_pt = NULL;
1357 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1358 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1359 int dynamic = 1;
1360 unsigned int cb_num = ctx->cb_num;
1361
1362 iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
1363
1364 if (IS_ERR(iommu_pt))
1365 return PTR_ERR(iommu_pt);
1366
1367 ret = iommu_domain_set_attr(iommu_pt->domain,
1368 DOMAIN_ATTR_DYNAMIC, &dynamic);
1369 if (ret) {
1370 KGSL_CORE_ERR("set DOMAIN_ATTR_DYNAMIC failed: %d\n", ret);
1371 goto done;
1372 }
1373 ret = iommu_domain_set_attr(iommu_pt->domain,
1374 DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
1375 if (ret) {
1376 KGSL_CORE_ERR("set DOMAIN_ATTR_CONTEXT_BANK failed: %d\n", ret);
1377 goto done;
1378 }
1379
1380 ret = iommu_domain_set_attr(iommu_pt->domain,
1381 DOMAIN_ATTR_PROCID, &pt->name);
1382 if (ret) {
1383 KGSL_CORE_ERR("set DOMAIN_ATTR_PROCID failed: %d\n", ret);
1384 goto done;
1385 }
1386
Sushmita Susheelendra906564d2017-01-10 15:53:55 -07001387 _enable_gpuhtw_llc(mmu, iommu_pt);
1388
Shrenuj Bansala419c792016-10-20 14:05:11 -07001389 ret = _attach_pt(iommu_pt, ctx);
1390 if (ret)
1391 goto done;
1392
1393 /* now read back the attributes needed for self programming */
1394 ret = iommu_domain_get_attr(iommu_pt->domain,
1395 DOMAIN_ATTR_TTBR0, &iommu_pt->ttbr0);
1396 if (ret) {
1397 KGSL_CORE_ERR("get DOMAIN_ATTR_TTBR0 failed: %d\n", ret);
1398 goto done;
1399 }
1400
1401 ret = iommu_domain_get_attr(iommu_pt->domain,
1402 DOMAIN_ATTR_CONTEXTIDR, &iommu_pt->contextidr);
1403 if (ret) {
1404 KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXTIDR failed: %d\n", ret);
1405 goto done;
1406 }
1407
1408 ret = kgsl_iommu_map_globals(pt);
1409
1410done:
1411 if (ret)
1412 _free_pt(ctx, pt);
1413
1414 return ret;
1415}
1416
1417/* kgsl_iommu_init_pt - Set up an IOMMU pagetable */
1418static int kgsl_iommu_init_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
1419{
1420 if (pt == NULL)
1421 return -EINVAL;
1422
1423 switch (pt->name) {
1424 case KGSL_MMU_GLOBAL_PT:
1425 return _init_global_pt(mmu, pt);
1426
1427 case KGSL_MMU_SECURE_PT:
1428 return _init_secure_pt(mmu, pt);
1429
1430 default:
1431 return _init_per_process_pt(mmu, pt);
1432 }
1433}
1434
1435static struct kgsl_pagetable *kgsl_iommu_getpagetable(struct kgsl_mmu *mmu,
1436 unsigned long name)
1437{
1438 struct kgsl_pagetable *pt;
1439
1440 if (!kgsl_mmu_is_perprocess(mmu) && (name != KGSL_MMU_SECURE_PT)) {
1441 name = KGSL_MMU_GLOBAL_PT;
1442 if (mmu->defaultpagetable != NULL)
1443 return mmu->defaultpagetable;
1444 }
1445
1446 pt = kgsl_get_pagetable(name);
1447 if (pt == NULL)
1448 pt = kgsl_mmu_createpagetableobject(mmu, name);
1449
1450 return pt;
1451}
1452
1453/*
1454 * kgsl_iommu_get_reg_ahbaddr - Returns the ahb address of the register
1455 * @mmu - Pointer to mmu structure
1456 * @id - The context ID of the IOMMU ctx
1457 * @reg - The register for which address is required
1458 *
1459 * Return - The address of register which can be used in type0 packet
1460 */
1461static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
1462 int id, unsigned int reg)
1463{
1464 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1465 struct kgsl_iommu_context *ctx = &iommu->ctx[id];
1466
1467 return ctx->gpu_offset + kgsl_iommu_reg_list[reg];
1468}
1469
1470static void _detach_context(struct kgsl_iommu_context *ctx)
1471{
1472 struct kgsl_iommu_pt *iommu_pt;
1473
1474 if (ctx->default_pt == NULL)
1475 return;
1476
1477 iommu_pt = ctx->default_pt->priv;
1478
1479 _detach_pt(iommu_pt, ctx);
1480
1481 ctx->default_pt = NULL;
1482}
1483
1484static void kgsl_iommu_close(struct kgsl_mmu *mmu)
1485{
1486 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1487 int i;
1488
1489 for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
1490 _detach_context(&iommu->ctx[i]);
1491
1492 kgsl_mmu_putpagetable(mmu->defaultpagetable);
1493 mmu->defaultpagetable = NULL;
1494
1495 kgsl_mmu_putpagetable(mmu->securepagetable);
1496 mmu->securepagetable = NULL;
1497
1498 if (iommu->regbase != NULL)
1499 iounmap(iommu->regbase);
1500
1501 kgsl_sharedmem_free(&kgsl_secure_guard_page_memdesc);
1502
1503 if (kgsl_guard_page != NULL) {
1504 __free_page(kgsl_guard_page);
1505 kgsl_guard_page = NULL;
1506 }
1507
1508 if (kgsl_dummy_page != NULL) {
1509 __free_page(kgsl_dummy_page);
1510 kgsl_dummy_page = NULL;
1511 }
1512
1513 kgsl_iommu_remove_global(mmu, &iommu->setstate);
1514 kgsl_sharedmem_free(&iommu->setstate);
1515 kgsl_cleanup_qdss_desc(mmu);
Jonathan Wicks4892d8d2017-02-24 16:21:26 -07001516 kgsl_cleanup_qtimer_desc(mmu);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001517}
1518
1519static int _setstate_alloc(struct kgsl_device *device,
1520 struct kgsl_iommu *iommu)
1521{
1522 int ret;
1523
Lynus Vaz90d98b52018-04-09 14:45:36 +05301524 kgsl_memdesc_init(device, &iommu->setstate, 0);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001525 ret = kgsl_sharedmem_alloc_contig(device, &iommu->setstate, PAGE_SIZE);
1526
1527 if (!ret) {
1528 /* Mark the setstate memory as read only */
1529 iommu->setstate.flags |= KGSL_MEMFLAGS_GPUREADONLY;
1530
1531 kgsl_sharedmem_set(device, &iommu->setstate, 0, 0, PAGE_SIZE);
1532 }
1533
1534 return ret;
1535}
1536
1537static int kgsl_iommu_init(struct kgsl_mmu *mmu)
1538{
1539 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
1540 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1541 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1542 int status;
1543
1544 mmu->features |= KGSL_MMU_PAGED;
1545
1546 if (ctx->name == NULL) {
1547 KGSL_CORE_ERR("dt: gfx3d0_user context bank not found\n");
1548 return -EINVAL;
1549 }
1550
1551 status = _setstate_alloc(device, iommu);
1552 if (status)
1553 return status;
1554
1555 /* check requirements for per process pagetables */
1556 if (ctx->gpu_offset == UINT_MAX) {
1557 KGSL_CORE_ERR("missing qcom,gpu-offset forces global pt\n");
1558 mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
1559 }
1560
1561 if (iommu->version == 1 && iommu->micro_mmu_ctrl == UINT_MAX) {
1562 KGSL_CORE_ERR(
1563 "missing qcom,micro-mmu-control forces global pt\n");
1564 mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
1565 }
1566
1567 /* Check to see if we need to do the IOMMU sync dance */
1568 need_iommu_sync = of_property_read_bool(device->pdev->dev.of_node,
1569 "qcom,gpu-quirk-iommu-sync");
1570
1571 iommu->regbase = ioremap(iommu->regstart, iommu->regsize);
1572 if (iommu->regbase == NULL) {
1573 KGSL_CORE_ERR("Could not map IOMMU registers 0x%lx:0x%x\n",
1574 iommu->regstart, iommu->regsize);
1575 status = -ENOMEM;
1576 goto done;
1577 }
1578
1579 if (addr_entry_cache == NULL) {
1580 addr_entry_cache = KMEM_CACHE(kgsl_iommu_addr_entry, 0);
1581 if (addr_entry_cache == NULL) {
1582 status = -ENOMEM;
1583 goto done;
1584 }
1585 }
1586
1587 kgsl_iommu_add_global(mmu, &iommu->setstate, "setstate");
1588 kgsl_setup_qdss_desc(device);
Jonathan Wicks4892d8d2017-02-24 16:21:26 -07001589 kgsl_setup_qtimer_desc(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001590
Harshdeep Dhatt1f408332017-03-27 11:35:13 -06001591 if (!mmu->secured)
1592 goto done;
1593
1594 mmu->securepagetable = kgsl_mmu_getpagetable(mmu,
1595 KGSL_MMU_SECURE_PT);
1596 if (IS_ERR(mmu->securepagetable)) {
1597 status = PTR_ERR(mmu->securepagetable);
1598 mmu->securepagetable = NULL;
1599 } else if (mmu->securepagetable == NULL) {
1600 status = -ENOMEM;
1601 }
1602
Shrenuj Bansala419c792016-10-20 14:05:11 -07001603done:
1604 if (status)
1605 kgsl_iommu_close(mmu);
1606
1607 return status;
1608}
1609
1610static int _setup_user_context(struct kgsl_mmu *mmu)
1611{
1612 int ret = 0;
1613 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1614 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
1615 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
1616 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
1617 struct kgsl_iommu_pt *iommu_pt = NULL;
1618 unsigned int sctlr_val;
1619
1620 if (mmu->defaultpagetable == NULL) {
1621 mmu->defaultpagetable = kgsl_mmu_getpagetable(mmu,
1622 KGSL_MMU_GLOBAL_PT);
1623 /* if we don't have a default pagetable, nothing will work */
1624 if (IS_ERR(mmu->defaultpagetable)) {
1625 ret = PTR_ERR(mmu->defaultpagetable);
1626 mmu->defaultpagetable = NULL;
1627 return ret;
Lynus Vaza2e31112017-04-17 18:29:58 +05301628 } else if (mmu->defaultpagetable == NULL) {
1629 return -ENOMEM;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001630 }
1631 }
1632
1633 iommu_pt = mmu->defaultpagetable->priv;
1634 if (iommu_pt == NULL)
1635 return -ENODEV;
1636
1637 ret = _attach_pt(iommu_pt, ctx);
1638 if (ret)
1639 return ret;
1640
1641 ctx->default_pt = mmu->defaultpagetable;
1642
1643 kgsl_iommu_enable_clk(mmu);
1644
1645 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
1646
1647 /*
1648 * If pagefault policy is GPUHALT_ENABLE,
1649 * 1) Program CFCFG to 1 to enable STALL mode
1650 * 2) Program HUPCF to 0 (Stall or terminate subsequent
1651 * transactions in the presence of an outstanding fault)
1652 * else
1653 * 1) Program CFCFG to 0 to disable STALL mode (0=Terminate)
1654 * 2) Program HUPCF to 1 (Process subsequent transactions
1655 * independently of any outstanding fault)
1656 */
1657
1658 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
1659 &adreno_dev->ft_pf_policy)) {
1660 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
1661 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
1662 } else {
1663 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
1664 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
1665 }
1666 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
1667 kgsl_iommu_disable_clk(mmu);
1668
1669 return 0;
1670}
1671
1672static int _setup_secure_context(struct kgsl_mmu *mmu)
1673{
1674 int ret;
1675 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1676 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
1677 unsigned int cb_num;
1678
1679 struct kgsl_iommu_pt *iommu_pt;
1680
1681 if (ctx->dev == NULL || !mmu->secured)
1682 return 0;
1683
Harshdeep Dhatt1f408332017-03-27 11:35:13 -06001684 if (mmu->securepagetable == NULL)
1685 return -ENOMEM;
1686
Shrenuj Bansala419c792016-10-20 14:05:11 -07001687 iommu_pt = mmu->securepagetable->priv;
1688
1689 ret = _attach_pt(iommu_pt, ctx);
1690 if (ret)
1691 goto done;
1692
1693 ctx->default_pt = mmu->securepagetable;
1694
1695 ret = iommu_domain_get_attr(iommu_pt->domain, DOMAIN_ATTR_CONTEXT_BANK,
1696 &cb_num);
1697 if (ret) {
1698 KGSL_CORE_ERR("get CONTEXT_BANK attr, err %d\n", ret);
1699 goto done;
1700 }
1701 ctx->cb_num = cb_num;
1702done:
1703 if (ret)
1704 _detach_context(ctx);
1705 return ret;
1706}
1707
1708static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
1709
1710static int kgsl_iommu_start(struct kgsl_mmu *mmu)
1711{
1712 int status;
1713 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
1714
1715 status = _setup_user_context(mmu);
1716 if (status)
1717 return status;
1718
1719 status = _setup_secure_context(mmu);
1720 if (status) {
1721 _detach_context(&iommu->ctx[KGSL_IOMMU_CONTEXT_USER]);
1722 return status;
1723 }
1724
1725 /* Make sure the hardware is programmed to the default pagetable */
1726 return kgsl_iommu_set_pt(mmu, mmu->defaultpagetable);
1727}
1728
1729static int
1730kgsl_iommu_unmap_offset(struct kgsl_pagetable *pt,
1731 struct kgsl_memdesc *memdesc, uint64_t addr,
1732 uint64_t offset, uint64_t size)
1733{
1734 if (size == 0 || (size + offset) > kgsl_memdesc_footprint(memdesc))
1735 return -EINVAL;
1736 /*
1737 * All GPU addresses as assigned are page aligned, but some
1738 * functions perturb the gpuaddr with an offset, so apply the
1739 * mask here to make sure we have the right address.
1740 */
1741
1742 addr = PAGE_ALIGN(addr);
1743 if (addr == 0)
1744 return -EINVAL;
1745
Carter Coopera1c7cce2017-12-15 13:29:29 -07001746 return _iommu_unmap_sync_pc(pt, addr + offset, size);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001747}
1748
1749static int
1750kgsl_iommu_unmap(struct kgsl_pagetable *pt, struct kgsl_memdesc *memdesc)
1751{
1752 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
1753 return -EINVAL;
1754
1755 return kgsl_iommu_unmap_offset(pt, memdesc, memdesc->gpuaddr, 0,
1756 kgsl_memdesc_footprint(memdesc));
1757}
1758
1759/**
1760 * _iommu_map_guard_page - Map iommu guard page
1761 * @pt - Pointer to kgsl pagetable structure
1762 * @memdesc - memdesc to add guard page
1763 * @gpuaddr - GPU addr of guard page
1764 * @protflags - flags for mapping
1765 *
1766 * Return 0 on success, error on map fail
1767 */
1768static int _iommu_map_guard_page(struct kgsl_pagetable *pt,
1769 struct kgsl_memdesc *memdesc,
1770 uint64_t gpuaddr,
1771 unsigned int protflags)
1772{
1773 phys_addr_t physaddr;
1774
1775 if (!kgsl_memdesc_has_guard_page(memdesc))
1776 return 0;
1777
1778 /*
1779 * Allocate guard page for secure buffers.
1780 * This has to be done after we attach a smmu pagetable.
1781 * Allocate the guard page when first secure buffer is.
1782 * mapped to save 1MB of memory if CPZ is not used.
1783 */
1784 if (kgsl_memdesc_is_secured(memdesc)) {
1785 struct scatterlist *sg;
1786 unsigned int sgp_size = pt->mmu->secure_align_mask + 1;
1787
1788 if (!kgsl_secure_guard_page_memdesc.sgt) {
1789 if (kgsl_allocate_user(KGSL_MMU_DEVICE(pt->mmu),
1790 &kgsl_secure_guard_page_memdesc,
1791 sgp_size, KGSL_MEMFLAGS_SECURE)) {
1792 KGSL_CORE_ERR(
1793 "Secure guard page alloc failed\n");
1794 return -ENOMEM;
1795 }
1796 }
1797
1798 sg = kgsl_secure_guard_page_memdesc.sgt->sgl;
1799 physaddr = page_to_phys(sg_page(sg));
1800 } else {
1801 if (kgsl_guard_page == NULL) {
1802 kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
1803 __GFP_NORETRY | __GFP_HIGHMEM);
1804 if (kgsl_guard_page == NULL)
1805 return -ENOMEM;
1806 }
1807
1808 physaddr = page_to_phys(kgsl_guard_page);
1809 }
1810
Carter Coopera1c7cce2017-12-15 13:29:29 -07001811 return _iommu_map_sync_pc(pt, gpuaddr, physaddr,
Shrenuj Bansala419c792016-10-20 14:05:11 -07001812 kgsl_memdesc_guard_page_size(memdesc),
1813 protflags & ~IOMMU_WRITE);
1814}
1815
1816static unsigned int _get_protection_flags(struct kgsl_memdesc *memdesc)
1817{
Sushmita Susheelendra7f66cf72016-09-12 11:04:43 -06001818 unsigned int flags = IOMMU_READ | IOMMU_WRITE |
1819 IOMMU_NOEXEC | IOMMU_USE_UPSTREAM_HINT;
Shrenuj Bansala419c792016-10-20 14:05:11 -07001820
1821 if (memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY)
1822 flags &= ~IOMMU_WRITE;
1823
1824 if (memdesc->priv & KGSL_MEMDESC_PRIVILEGED)
1825 flags |= IOMMU_PRIV;
1826
Shrenuj Bansal4fd6a562017-08-07 15:12:54 -07001827 if (memdesc->flags & KGSL_MEMFLAGS_IOCOHERENT)
1828 flags |= IOMMU_CACHE;
1829
Shrenuj Bansala419c792016-10-20 14:05:11 -07001830 return flags;
1831}
1832
1833static int
1834kgsl_iommu_map(struct kgsl_pagetable *pt,
1835 struct kgsl_memdesc *memdesc)
1836{
1837 int ret;
1838 uint64_t addr = memdesc->gpuaddr;
1839 uint64_t size = memdesc->size;
1840 unsigned int flags = _get_protection_flags(memdesc);
1841 struct sg_table *sgt = NULL;
1842
1843 /*
1844 * For paged memory allocated through kgsl, memdesc->pages is not NULL.
1845 * Allocate sgt here just for its map operation. Contiguous memory
1846 * already has its sgt, so no need to allocate it here.
1847 */
1848 if (memdesc->pages != NULL)
1849 sgt = kgsl_alloc_sgt_from_pages(memdesc);
1850 else
1851 sgt = memdesc->sgt;
1852
1853 if (IS_ERR(sgt))
1854 return PTR_ERR(sgt);
1855
Carter Coopera1c7cce2017-12-15 13:29:29 -07001856 ret = _iommu_map_sg_sync_pc(pt, addr, sgt->sgl, sgt->nents, flags);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001857 if (ret)
1858 goto done;
1859
1860 ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
1861 if (ret)
Carter Coopera1c7cce2017-12-15 13:29:29 -07001862 _iommu_unmap_sync_pc(pt, addr, size);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001863
1864done:
1865 if (memdesc->pages != NULL)
1866 kgsl_free_sgt(sgt);
1867
1868 return ret;
1869}
1870
1871static int kgsl_iommu_sparse_dummy_map(struct kgsl_pagetable *pt,
1872 struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size)
1873{
1874 int ret = 0, i;
1875 struct page **pages = NULL;
1876 struct sg_table sgt;
1877 int count = size >> PAGE_SHIFT;
1878
1879 /* verify the offset is within our range */
1880 if (size + offset > memdesc->size)
1881 return -EINVAL;
1882
1883 if (kgsl_dummy_page == NULL) {
1884 kgsl_dummy_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
1885 __GFP_HIGHMEM);
1886 if (kgsl_dummy_page == NULL)
1887 return -ENOMEM;
1888 }
1889
1890 pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
1891 if (pages == NULL)
1892 return -ENOMEM;
1893
1894 for (i = 0; i < count; i++)
1895 pages[i] = kgsl_dummy_page;
1896
1897 ret = sg_alloc_table_from_pages(&sgt, pages, count,
1898 0, size, GFP_KERNEL);
1899 if (ret == 0) {
1900 ret = _iommu_map_sg_sync_pc(pt, memdesc->gpuaddr + offset,
Carter Coopera1c7cce2017-12-15 13:29:29 -07001901 sgt.sgl, sgt.nents, IOMMU_READ | IOMMU_NOEXEC);
Shrenuj Bansala419c792016-10-20 14:05:11 -07001902 sg_free_table(&sgt);
1903 }
1904
1905 kfree(pages);
1906
1907 return ret;
1908}
1909
1910static int _map_to_one_page(struct kgsl_pagetable *pt, uint64_t addr,
1911 struct kgsl_memdesc *memdesc, uint64_t physoffset,
1912 uint64_t size, unsigned int map_flags)
1913{
1914 int ret = 0, i;
1915 int pg_sz = kgsl_memdesc_get_pagesize(memdesc);
1916 int count = size >> PAGE_SHIFT;
1917 struct page *page = NULL;
1918 struct page **pages = NULL;
1919 struct sg_page_iter sg_iter;
1920 struct sg_table sgt;
1921
1922 /* Find our physaddr offset addr */
1923 if (memdesc->pages != NULL)
1924 page = memdesc->pages[physoffset >> PAGE_SHIFT];
1925 else {
1926 for_each_sg_page(memdesc->sgt->sgl, &sg_iter,
1927 memdesc->sgt->nents, physoffset >> PAGE_SHIFT) {
1928 page = sg_page_iter_page(&sg_iter);
1929 break;
1930 }
1931 }
1932
1933 if (page == NULL)
1934 return -EINVAL;
1935
1936 pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
1937 if (pages == NULL)
1938 return -ENOMEM;
1939
1940 for (i = 0; i < count; i++) {
1941 if (pg_sz != PAGE_SIZE) {
1942 struct page *tmp_page = page;
1943 int j;
1944
1945 for (j = 0; j < 16; j++, tmp_page += PAGE_SIZE)
1946 pages[i++] = tmp_page;
1947 } else
1948 pages[i] = page;
1949 }
1950
1951 ret = sg_alloc_table_from_pages(&sgt, pages, count,
1952 0, size, GFP_KERNEL);
1953 if (ret == 0) {
Carter Coopera1c7cce2017-12-15 13:29:29 -07001954 ret = _iommu_map_sg_sync_pc(pt, addr, sgt.sgl,
Shrenuj Bansala419c792016-10-20 14:05:11 -07001955 sgt.nents, map_flags);
1956 sg_free_table(&sgt);
1957 }
1958
1959 kfree(pages);
1960
1961 return ret;
1962}
1963
1964static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
1965 uint64_t virtaddr, uint64_t virtoffset,
1966 struct kgsl_memdesc *memdesc, uint64_t physoffset,
1967 uint64_t size, uint64_t feature_flag)
1968{
1969 int pg_sz;
1970 unsigned int protflags = _get_protection_flags(memdesc);
1971 int ret;
1972 struct sg_table *sgt = NULL;
1973
1974 pg_sz = kgsl_memdesc_get_pagesize(memdesc);
1975 if (!IS_ALIGNED(virtaddr | virtoffset | physoffset | size, pg_sz))
1976 return -EINVAL;
1977
1978 if (size == 0)
1979 return -EINVAL;
1980
1981 if (!(feature_flag & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS) &&
1982 size + physoffset > kgsl_memdesc_footprint(memdesc))
1983 return -EINVAL;
1984
1985 /*
1986 * For paged memory allocated through kgsl, memdesc->pages is not NULL.
1987 * Allocate sgt here just for its map operation. Contiguous memory
1988 * already has its sgt, so no need to allocate it here.
1989 */
1990 if (memdesc->pages != NULL)
1991 sgt = kgsl_alloc_sgt_from_pages(memdesc);
1992 else
1993 sgt = memdesc->sgt;
1994
1995 if (IS_ERR(sgt))
1996 return PTR_ERR(sgt);
1997
1998 if (feature_flag & KGSL_SPARSE_BIND_MULTIPLE_TO_PHYS)
1999 ret = _map_to_one_page(pt, virtaddr + virtoffset,
2000 memdesc, physoffset, size, protflags);
2001 else
2002 ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
Carter Coopera1c7cce2017-12-15 13:29:29 -07002003 sgt->sgl, sgt->nents,
Shrenuj Bansala419c792016-10-20 14:05:11 -07002004 physoffset, size, protflags);
2005
2006 if (memdesc->pages != NULL)
2007 kgsl_free_sgt(sgt);
2008
2009 return ret;
2010}
2011
2012/* This function must be called with context bank attached */
2013static void kgsl_iommu_clear_fsr(struct kgsl_mmu *mmu)
2014{
2015 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2016 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2017 unsigned int sctlr_val;
2018
2019 if (ctx->default_pt != NULL) {
2020 kgsl_iommu_enable_clk(mmu);
2021 KGSL_IOMMU_SET_CTX_REG(ctx, FSR, 0xffffffff);
2022 /*
2023 * Re-enable context fault interrupts after clearing
2024 * FSR to prevent the interrupt from firing repeatedly
2025 */
2026 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
2027 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
2028 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
2029 /*
2030 * Make sure the above register writes
2031 * are not reordered across the barrier
2032 * as we use writel_relaxed to write them
2033 */
2034 wmb();
2035 kgsl_iommu_disable_clk(mmu);
2036 }
2037}
2038
2039static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
2040{
2041 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2042 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2043
2044 if (ctx->default_pt != NULL && ctx->fault) {
2045 /*
2046 * Write 1 to RESUME.TnR to terminate the
2047 * stalled transaction.
2048 */
2049 KGSL_IOMMU_SET_CTX_REG(ctx, RESUME, 1);
2050 /*
2051 * Make sure the above register writes
2052 * are not reordered across the barrier
2053 * as we use writel_relaxed to write them
2054 */
2055 wmb();
2056 ctx->fault = 0;
2057 }
2058}
2059
2060static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
2061{
2062 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2063 int i;
2064
2065 /*
2066 * If the iommu supports retention, we don't need
2067 * to detach when stopping.
2068 */
2069 if (!MMU_FEATURE(mmu, KGSL_MMU_RETENTION)) {
2070 for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
2071 _detach_context(&iommu->ctx[i]);
2072 }
2073}
2074
2075static u64
2076kgsl_iommu_get_current_ttbr0(struct kgsl_mmu *mmu)
2077{
2078 u64 val;
2079 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
Harshdeep Dhatt1e55e212018-10-12 20:32:17 -06002080 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2081
Shrenuj Bansala419c792016-10-20 14:05:11 -07002082 /*
2083 * We cannot enable or disable the clocks in interrupt context, this
2084 * function is called from interrupt context if there is an axi error
2085 */
2086 if (in_interrupt())
2087 return 0;
2088
Harshdeep Dhatt1e55e212018-10-12 20:32:17 -06002089 if (ctx->regbase == NULL)
2090 return 0;
2091
Shrenuj Bansala419c792016-10-20 14:05:11 -07002092 kgsl_iommu_enable_clk(mmu);
Harshdeep Dhatt1e55e212018-10-12 20:32:17 -06002093 val = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002094 kgsl_iommu_disable_clk(mmu);
2095 return val;
2096}
2097
2098/*
2099 * kgsl_iommu_set_pt - Change the IOMMU pagetable of the primary context bank
2100 * @mmu - Pointer to mmu structure
2101 * @pt - Pagetable to switch to
2102 *
2103 * Set the new pagetable for the IOMMU by doing direct register writes
2104 * to the IOMMU registers through the cpu
2105 *
2106 * Return - void
2107 */
2108static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
2109{
2110 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2111 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2112 uint64_t ttbr0, temp;
2113 unsigned int contextidr;
2114 unsigned long wait_for_flush;
2115
2116 if ((pt != mmu->defaultpagetable) && !kgsl_mmu_is_perprocess(mmu))
2117 return 0;
2118
2119 kgsl_iommu_enable_clk(mmu);
2120
2121 ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pt);
2122 contextidr = kgsl_mmu_pagetable_get_contextidr(pt);
2123
2124 KGSL_IOMMU_SET_CTX_REG_Q(ctx, TTBR0, ttbr0);
2125 KGSL_IOMMU_SET_CTX_REG(ctx, CONTEXTIDR, contextidr);
2126
2127 /* memory barrier before reading TTBR0 register */
2128 mb();
2129 temp = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
2130
2131 KGSL_IOMMU_SET_CTX_REG(ctx, TLBIALL, 1);
2132 /* make sure the TBLI write completes before we wait */
2133 mb();
2134 /*
2135 * Wait for flush to complete by polling the flush
2136 * status bit of TLBSTATUS register for not more than
2137 * 2 s. After 2s just exit, at that point the SMMU h/w
2138 * may be stuck and will eventually cause GPU to hang
2139 * or bring the system down.
2140 */
2141 wait_for_flush = jiffies + msecs_to_jiffies(2000);
2142 KGSL_IOMMU_SET_CTX_REG(ctx, TLBSYNC, 0);
2143 while (KGSL_IOMMU_GET_CTX_REG(ctx, TLBSTATUS) &
2144 (KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE)) {
2145 if (time_after(jiffies, wait_for_flush)) {
2146 KGSL_DRV_WARN(KGSL_MMU_DEVICE(mmu),
2147 "Wait limit reached for IOMMU tlb flush\n");
2148 break;
2149 }
2150 cpu_relax();
2151 }
2152
2153 kgsl_iommu_disable_clk(mmu);
2154 return 0;
2155}
2156
2157/*
2158 * kgsl_iommu_set_pf_policy() - Set the pagefault policy for IOMMU
2159 * @mmu: Pointer to mmu structure
2160 * @pf_policy: The pagefault polict to set
2161 *
2162 * Check if the new policy indicated by pf_policy is same as current
2163 * policy, if same then return else set the policy
2164 */
2165static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
2166 unsigned long pf_policy)
2167{
2168 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2169 struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
2170 struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
2171 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
2172
2173 if ((adreno_dev->ft_pf_policy &
2174 BIT(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)) ==
2175 (pf_policy & BIT(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)))
2176 return 0;
2177
2178 /* If not attached, policy will be updated during the next attach */
2179 if (ctx->default_pt != NULL) {
2180 unsigned int sctlr_val;
2181
2182 kgsl_iommu_enable_clk(mmu);
2183
2184 sctlr_val = KGSL_IOMMU_GET_CTX_REG(ctx, SCTLR);
2185
2186 if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, &pf_policy)) {
2187 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
2188 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
2189 } else {
2190 sctlr_val &= ~(0x1 << KGSL_IOMMU_SCTLR_CFCFG_SHIFT);
2191 sctlr_val |= (0x1 << KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
2192 }
2193
2194 KGSL_IOMMU_SET_CTX_REG(ctx, SCTLR, sctlr_val);
2195
2196 kgsl_iommu_disable_clk(mmu);
2197 }
2198
2199 return 0;
2200}
2201
2202static struct kgsl_protected_registers *
2203kgsl_iommu_get_prot_regs(struct kgsl_mmu *mmu)
2204{
2205 struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
2206
2207 return &iommu->protect;
2208}
2209
2210static struct kgsl_iommu_addr_entry *_find_gpuaddr(
2211 struct kgsl_pagetable *pagetable, uint64_t gpuaddr)
2212{
2213 struct kgsl_iommu_pt *pt = pagetable->priv;
2214 struct rb_node *node = pt->rbtree.rb_node;
2215
2216 while (node != NULL) {
2217 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2218 struct kgsl_iommu_addr_entry, node);
2219
2220 if (gpuaddr < entry->base)
2221 node = node->rb_left;
2222 else if (gpuaddr > entry->base)
2223 node = node->rb_right;
2224 else
2225 return entry;
2226 }
2227
2228 return NULL;
2229}
2230
2231static int _remove_gpuaddr(struct kgsl_pagetable *pagetable,
2232 uint64_t gpuaddr)
2233{
2234 struct kgsl_iommu_pt *pt = pagetable->priv;
2235 struct kgsl_iommu_addr_entry *entry;
2236
2237 entry = _find_gpuaddr(pagetable, gpuaddr);
2238
2239 if (entry != NULL) {
2240 rb_erase(&entry->node, &pt->rbtree);
2241 kmem_cache_free(addr_entry_cache, entry);
2242 return 0;
2243 }
2244
2245 WARN(1, "Couldn't remove gpuaddr: 0x%llx\n", gpuaddr);
2246 return -ENOMEM;
2247}
2248
2249static int _insert_gpuaddr(struct kgsl_pagetable *pagetable,
2250 uint64_t gpuaddr, uint64_t size)
2251{
2252 struct kgsl_iommu_pt *pt = pagetable->priv;
2253 struct rb_node **node, *parent = NULL;
2254 struct kgsl_iommu_addr_entry *new =
2255 kmem_cache_alloc(addr_entry_cache, GFP_ATOMIC);
2256
2257 if (new == NULL)
2258 return -ENOMEM;
2259
2260 new->base = gpuaddr;
2261 new->size = size;
2262
2263 node = &pt->rbtree.rb_node;
2264
2265 while (*node != NULL) {
2266 struct kgsl_iommu_addr_entry *this;
2267
2268 parent = *node;
2269 this = rb_entry(parent, struct kgsl_iommu_addr_entry, node);
2270
2271 if (new->base < this->base)
2272 node = &parent->rb_left;
2273 else if (new->base > this->base)
2274 node = &parent->rb_right;
2275 else {
2276 /* Duplicate entry */
2277 WARN(1, "duplicate gpuaddr: 0x%llx\n", gpuaddr);
2278 return -EEXIST;
2279 }
2280 }
2281
2282 rb_link_node(&new->node, parent, node);
2283 rb_insert_color(&new->node, &pt->rbtree);
2284
2285 return 0;
2286}
2287
2288static uint64_t _get_unmapped_area(struct kgsl_pagetable *pagetable,
2289 uint64_t bottom, uint64_t top, uint64_t size,
2290 uint64_t align)
2291{
2292 struct kgsl_iommu_pt *pt = pagetable->priv;
2293 struct rb_node *node = rb_first(&pt->rbtree);
2294 uint64_t start;
2295
2296 bottom = ALIGN(bottom, align);
2297 start = bottom;
2298
2299 while (node != NULL) {
2300 uint64_t gap;
2301 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2302 struct kgsl_iommu_addr_entry, node);
2303
2304 /*
2305 * Skip any entries that are outside of the range, but make sure
2306 * to account for some that might straddle the lower bound
2307 */
2308 if (entry->base < bottom) {
2309 if (entry->base + entry->size > bottom)
2310 start = ALIGN(entry->base + entry->size, align);
2311 node = rb_next(node);
2312 continue;
2313 }
2314
2315 /* Stop if we went over the top */
2316 if (entry->base >= top)
2317 break;
2318
2319 /* Make sure there is a gap to consider */
2320 if (start < entry->base) {
2321 gap = entry->base - start;
2322
2323 if (gap >= size)
2324 return start;
2325 }
2326
2327 /* Stop if there is no more room in the region */
2328 if (entry->base + entry->size >= top)
2329 return (uint64_t) -ENOMEM;
2330
2331 /* Start the next cycle at the end of the current entry */
2332 start = ALIGN(entry->base + entry->size, align);
2333 node = rb_next(node);
2334 }
2335
2336 if (start + size <= top)
2337 return start;
2338
2339 return (uint64_t) -ENOMEM;
2340}
2341
2342static uint64_t _get_unmapped_area_topdown(struct kgsl_pagetable *pagetable,
2343 uint64_t bottom, uint64_t top, uint64_t size,
2344 uint64_t align)
2345{
2346 struct kgsl_iommu_pt *pt = pagetable->priv;
2347 struct rb_node *node = rb_last(&pt->rbtree);
2348 uint64_t end = top;
2349 uint64_t mask = ~(align - 1);
2350 struct kgsl_iommu_addr_entry *entry;
2351
2352 /* Make sure that the bottom is correctly aligned */
2353 bottom = ALIGN(bottom, align);
2354
2355 /* Make sure the requested size will fit in the range */
2356 if (size > (top - bottom))
2357 return -ENOMEM;
2358
2359 /* Walk back through the list to find the highest entry in the range */
2360 for (node = rb_last(&pt->rbtree); node != NULL; node = rb_prev(node)) {
2361 entry = rb_entry(node, struct kgsl_iommu_addr_entry, node);
2362 if (entry->base < top)
2363 break;
2364 }
2365
2366 while (node != NULL) {
2367 uint64_t offset;
2368
2369 entry = rb_entry(node, struct kgsl_iommu_addr_entry, node);
2370
2371 /* If the entire entry is below the range the search is over */
2372 if ((entry->base + entry->size) < bottom)
2373 break;
2374
2375 /* Get the top of the entry properly aligned */
2376 offset = ALIGN(entry->base + entry->size, align);
2377
2378 /*
2379 * Try to allocate the memory from the top of the gap,
2380 * making sure that it fits between the top of this entry and
2381 * the bottom of the previous one
2382 */
2383
2384 if ((end > size) && (offset < end)) {
2385 uint64_t chunk = (end - size) & mask;
2386
2387 if (chunk >= offset)
2388 return chunk;
2389 }
2390
2391 /*
2392 * If we get here and the current entry is outside of the range
2393 * then we are officially out of room
2394 */
2395
2396 if (entry->base < bottom)
2397 return (uint64_t) -ENOMEM;
2398
2399 /* Set the top of the gap to the current entry->base */
2400 end = entry->base;
2401
2402 /* And move on to the next lower entry */
2403 node = rb_prev(node);
2404 }
2405
2406 /* If we get here then there are no more entries in the region */
2407 if ((end > size) && (((end - size) & mask) >= bottom))
2408 return (end - size) & mask;
2409
2410 return (uint64_t) -ENOMEM;
2411}
2412
2413static uint64_t kgsl_iommu_find_svm_region(struct kgsl_pagetable *pagetable,
2414 uint64_t start, uint64_t end, uint64_t size,
2415 uint64_t alignment)
2416{
2417 uint64_t addr;
2418
2419 /* Avoid black holes */
2420 if (WARN(end <= start, "Bad search range: 0x%llx-0x%llx", start, end))
2421 return (uint64_t) -EINVAL;
2422
2423 spin_lock(&pagetable->lock);
2424 addr = _get_unmapped_area_topdown(pagetable,
2425 start, end, size, alignment);
2426 spin_unlock(&pagetable->lock);
2427 return addr;
2428}
2429
Rohan Sethida072642020-08-20 17:09:16 +05302430static bool iommu_addr_in_svm_ranges(struct kgsl_iommu_pt *pt,
2431 u64 gpuaddr, u64 size)
2432{
2433 if ((gpuaddr >= pt->compat_va_start && gpuaddr < pt->compat_va_end) &&
2434 ((gpuaddr + size) > pt->compat_va_start &&
2435 (gpuaddr + size) <= pt->compat_va_end))
2436 return true;
2437
2438 if ((gpuaddr >= pt->svm_start && gpuaddr < pt->svm_end) &&
2439 ((gpuaddr + size) > pt->svm_start &&
2440 (gpuaddr + size) <= pt->svm_end))
2441 return true;
2442
2443 return false;
2444}
2445
Shrenuj Bansala419c792016-10-20 14:05:11 -07002446static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
2447 uint64_t gpuaddr, uint64_t size)
2448{
2449 int ret = -ENOMEM;
2450 struct kgsl_iommu_pt *pt = pagetable->priv;
2451 struct rb_node *node;
2452
Rohan Sethida072642020-08-20 17:09:16 +05302453 /* Make sure the requested address doesn't fall out of SVM range */
2454 if (!iommu_addr_in_svm_ranges(pt, gpuaddr, size))
Shrenuj Bansala419c792016-10-20 14:05:11 -07002455 return -ENOMEM;
2456
2457 spin_lock(&pagetable->lock);
2458 node = pt->rbtree.rb_node;
2459
2460 while (node != NULL) {
2461 uint64_t start, end;
2462 struct kgsl_iommu_addr_entry *entry = rb_entry(node,
2463 struct kgsl_iommu_addr_entry, node);
2464
2465 start = entry->base;
2466 end = entry->base + entry->size;
2467
2468 if (gpuaddr + size <= start)
2469 node = node->rb_left;
2470 else if (end <= gpuaddr)
2471 node = node->rb_right;
2472 else
2473 goto out;
2474 }
2475
2476 ret = _insert_gpuaddr(pagetable, gpuaddr, size);
2477out:
2478 spin_unlock(&pagetable->lock);
2479 return ret;
2480}
2481
2482
2483static int kgsl_iommu_get_gpuaddr(struct kgsl_pagetable *pagetable,
2484 struct kgsl_memdesc *memdesc)
2485{
2486 struct kgsl_iommu_pt *pt = pagetable->priv;
2487 int ret = 0;
2488 uint64_t addr, start, end, size;
2489 unsigned int align;
2490
2491 if (WARN_ON(kgsl_memdesc_use_cpu_map(memdesc)))
2492 return -EINVAL;
2493
2494 if (memdesc->flags & KGSL_MEMFLAGS_SECURE &&
2495 pagetable->name != KGSL_MMU_SECURE_PT)
2496 return -EINVAL;
2497
2498 size = kgsl_memdesc_footprint(memdesc);
2499
2500 align = 1 << kgsl_memdesc_get_align(memdesc);
2501
2502 if (memdesc->flags & KGSL_MEMFLAGS_FORCE_32BIT) {
2503 start = pt->compat_va_start;
2504 end = pt->compat_va_end;
2505 } else {
2506 start = pt->va_start;
2507 end = pt->va_end;
2508 }
2509
Harshdeep Dhatt1f408332017-03-27 11:35:13 -06002510 /*
2511 * When mapping secure buffers, adjust the start of the va range
2512 * to the end of secure global buffers.
2513 */
2514 if (kgsl_memdesc_is_secured(memdesc))
2515 start += secure_global_size;
2516
Shrenuj Bansala419c792016-10-20 14:05:11 -07002517 spin_lock(&pagetable->lock);
2518
2519 addr = _get_unmapped_area(pagetable, start, end, size, align);
2520
2521 if (addr == (uint64_t) -ENOMEM) {
2522 ret = -ENOMEM;
2523 goto out;
2524 }
2525
2526 ret = _insert_gpuaddr(pagetable, addr, size);
2527 if (ret == 0) {
2528 memdesc->gpuaddr = addr;
2529 memdesc->pagetable = pagetable;
2530 }
2531
2532out:
2533 spin_unlock(&pagetable->lock);
2534 return ret;
2535}
2536
2537static void kgsl_iommu_put_gpuaddr(struct kgsl_memdesc *memdesc)
2538{
2539 if (memdesc->pagetable == NULL)
2540 return;
2541
2542 spin_lock(&memdesc->pagetable->lock);
2543
2544 _remove_gpuaddr(memdesc->pagetable, memdesc->gpuaddr);
2545
2546 spin_unlock(&memdesc->pagetable->lock);
2547}
2548
2549static int kgsl_iommu_svm_range(struct kgsl_pagetable *pagetable,
2550 uint64_t *lo, uint64_t *hi, uint64_t memflags)
2551{
2552 struct kgsl_iommu_pt *pt = pagetable->priv;
2553 bool gpu_compat = (memflags & KGSL_MEMFLAGS_FORCE_32BIT) != 0;
2554
2555 if (lo != NULL)
2556 *lo = gpu_compat ? pt->compat_va_start : pt->svm_start;
2557 if (hi != NULL)
2558 *hi = gpu_compat ? pt->compat_va_end : pt->svm_end;
2559
2560 return 0;
2561}
2562
2563static bool kgsl_iommu_addr_in_range(struct kgsl_pagetable *pagetable,
2564 uint64_t gpuaddr)
2565{
2566 struct kgsl_iommu_pt *pt = pagetable->priv;
2567
2568 if (gpuaddr == 0)
2569 return false;
2570
2571 if (gpuaddr >= pt->va_start && gpuaddr < pt->va_end)
2572 return true;
2573
2574 if (gpuaddr >= pt->compat_va_start && gpuaddr < pt->compat_va_end)
2575 return true;
2576
2577 if (gpuaddr >= pt->svm_start && gpuaddr < pt->svm_end)
2578 return true;
2579
2580 return false;
2581}
2582
2583static const struct {
2584 int id;
2585 char *name;
2586} kgsl_iommu_cbs[] = {
2587 { KGSL_IOMMU_CONTEXT_USER, "gfx3d_user", },
2588 { KGSL_IOMMU_CONTEXT_SECURE, "gfx3d_secure" },
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302589 { KGSL_IOMMU_CONTEXT_SECURE, "gfx3d_secure_alt" },
Shrenuj Bansala419c792016-10-20 14:05:11 -07002590};
2591
2592static int _kgsl_iommu_cb_probe(struct kgsl_device *device,
2593 struct kgsl_iommu *iommu, struct device_node *node)
2594{
2595 struct platform_device *pdev = of_find_device_by_node(node);
2596 struct kgsl_iommu_context *ctx = NULL;
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302597 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002598 int i;
2599
2600 for (i = 0; i < ARRAY_SIZE(kgsl_iommu_cbs); i++) {
2601 if (!strcmp(node->name, kgsl_iommu_cbs[i].name)) {
2602 int id = kgsl_iommu_cbs[i].id;
2603
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302604 if (ADRENO_QUIRK(adreno_dev,
2605 ADRENO_QUIRK_MMU_SECURE_CB_ALT)) {
2606 if (!strcmp(node->name, "gfx3d_secure"))
2607 continue;
2608 } else if (!strcmp(node->name, "gfx3d_secure_alt"))
2609 continue;
2610
Shrenuj Bansala419c792016-10-20 14:05:11 -07002611 ctx = &iommu->ctx[id];
2612 ctx->id = id;
2613 ctx->cb_num = -1;
2614 ctx->name = kgsl_iommu_cbs[i].name;
2615
2616 break;
2617 }
2618 }
2619
2620 if (ctx == NULL) {
Rajesh Kemisetti63d93582018-01-31 10:52:56 +05302621 KGSL_CORE_ERR("dt: Unused context label %s\n", node->name);
2622 return 0;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002623 }
2624
2625 if (ctx->id == KGSL_IOMMU_CONTEXT_SECURE)
2626 device->mmu.secured = true;
2627
2628 /* this property won't be found for all context banks */
2629 if (of_property_read_u32(node, "qcom,gpu-offset", &ctx->gpu_offset))
2630 ctx->gpu_offset = UINT_MAX;
2631
2632 ctx->kgsldev = device;
2633
2634 /* arm-smmu driver we'll have the right device pointer here. */
2635 if (of_find_property(node, "iommus", NULL)) {
2636 ctx->dev = &pdev->dev;
2637 } else {
2638 ctx->dev = kgsl_mmu_get_ctx(ctx->name);
2639
2640 if (IS_ERR(ctx->dev))
2641 return PTR_ERR(ctx->dev);
2642 }
2643
2644 return 0;
2645}
2646
2647static const struct {
2648 char *feature;
Lynus Vazeb7af682017-04-17 18:36:01 +05302649 unsigned long bit;
Shrenuj Bansala419c792016-10-20 14:05:11 -07002650} kgsl_iommu_features[] = {
2651 { "qcom,retention", KGSL_MMU_RETENTION },
2652 { "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
2653 { "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC },
2654 { "qcom,force-32bit", KGSL_MMU_FORCE_32BIT },
2655};
2656
2657static int _kgsl_iommu_probe(struct kgsl_device *device,
2658 struct device_node *node)
2659{
2660 const char *cname;
2661 struct property *prop;
2662 u32 reg_val[2];
2663 int i = 0;
2664 struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
2665 struct device_node *child;
2666 struct platform_device *pdev = of_find_device_by_node(node);
2667
2668 memset(iommu, 0, sizeof(*iommu));
2669
2670 if (of_device_is_compatible(node, "qcom,kgsl-smmu-v1"))
2671 iommu->version = 1;
2672 else
2673 iommu->version = 2;
2674
2675 if (of_property_read_u32_array(node, "reg", reg_val, 2)) {
2676 KGSL_CORE_ERR("dt: Unable to read KGSL IOMMU register range\n");
2677 return -EINVAL;
2678 }
2679 iommu->regstart = reg_val[0];
2680 iommu->regsize = reg_val[1];
2681
2682 /* Protecting the SMMU registers is mandatory */
2683 if (of_property_read_u32_array(node, "qcom,protect", reg_val, 2)) {
2684 KGSL_CORE_ERR("dt: no iommu protection range specified\n");
2685 return -EINVAL;
2686 }
2687 iommu->protect.base = reg_val[0] / sizeof(u32);
Lynus Vaz607a42d2018-05-23 20:26:51 +05302688 iommu->protect.range = reg_val[1] / sizeof(u32);
Shrenuj Bansala419c792016-10-20 14:05:11 -07002689
2690 of_property_for_each_string(node, "clock-names", prop, cname) {
2691 struct clk *c = devm_clk_get(&pdev->dev, cname);
2692
2693 if (IS_ERR(c)) {
2694 KGSL_CORE_ERR("dt: Couldn't get clock: %s\n", cname);
2695 return -ENODEV;
2696 }
2697 if (i >= KGSL_IOMMU_MAX_CLKS) {
2698 KGSL_CORE_ERR("dt: too many clocks defined.\n");
2699 return -EINVAL;
2700 }
2701
2702 iommu->clks[i] = c;
2703 ++i;
2704 }
2705
2706 for (i = 0; i < ARRAY_SIZE(kgsl_iommu_features); i++) {
2707 if (of_property_read_bool(node, kgsl_iommu_features[i].feature))
2708 device->mmu.features |= kgsl_iommu_features[i].bit;
2709 }
2710
2711 if (of_property_read_u32(node, "qcom,micro-mmu-control",
2712 &iommu->micro_mmu_ctrl))
2713 iommu->micro_mmu_ctrl = UINT_MAX;
2714
2715 if (of_property_read_u32(node, "qcom,secure_align_mask",
2716 &device->mmu.secure_align_mask))
2717 device->mmu.secure_align_mask = 0xfff;
2718
2719 /* Fill out the rest of the devices in the node */
2720 of_platform_populate(node, NULL, NULL, &pdev->dev);
2721
2722 for_each_child_of_node(node, child) {
2723 int ret;
2724
2725 if (!of_device_is_compatible(child, "qcom,smmu-kgsl-cb"))
2726 continue;
2727
2728 ret = _kgsl_iommu_cb_probe(device, iommu, child);
2729 if (ret)
2730 return ret;
2731 }
2732
2733 return 0;
2734}
2735
2736static const struct {
2737 char *compat;
2738 int (*probe)(struct kgsl_device *device, struct device_node *node);
2739} kgsl_dt_devices[] = {
2740 { "qcom,kgsl-smmu-v1", _kgsl_iommu_probe },
2741 { "qcom,kgsl-smmu-v2", _kgsl_iommu_probe },
2742};
2743
2744static int kgsl_iommu_probe(struct kgsl_device *device)
2745{
2746 int i;
2747
2748 for (i = 0; i < ARRAY_SIZE(kgsl_dt_devices); i++) {
2749 struct device_node *node;
2750
2751 node = of_find_compatible_node(device->pdev->dev.of_node,
2752 NULL, kgsl_dt_devices[i].compat);
2753
2754 if (node != NULL)
2755 return kgsl_dt_devices[i].probe(device, node);
2756 }
2757
2758 return -ENODEV;
2759}
2760
2761struct kgsl_mmu_ops kgsl_iommu_ops = {
2762 .mmu_init = kgsl_iommu_init,
2763 .mmu_close = kgsl_iommu_close,
2764 .mmu_start = kgsl_iommu_start,
2765 .mmu_stop = kgsl_iommu_stop,
2766 .mmu_set_pt = kgsl_iommu_set_pt,
2767 .mmu_clear_fsr = kgsl_iommu_clear_fsr,
2768 .mmu_get_current_ttbr0 = kgsl_iommu_get_current_ttbr0,
2769 .mmu_enable_clk = kgsl_iommu_enable_clk,
2770 .mmu_disable_clk = kgsl_iommu_disable_clk,
2771 .mmu_get_reg_ahbaddr = kgsl_iommu_get_reg_ahbaddr,
2772 .mmu_pt_equal = kgsl_iommu_pt_equal,
2773 .mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
2774 .mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
2775 .mmu_get_prot_regs = kgsl_iommu_get_prot_regs,
2776 .mmu_init_pt = kgsl_iommu_init_pt,
2777 .mmu_add_global = kgsl_iommu_add_global,
2778 .mmu_remove_global = kgsl_iommu_remove_global,
2779 .mmu_getpagetable = kgsl_iommu_getpagetable,
2780 .mmu_get_qdss_global_entry = kgsl_iommu_get_qdss_global_entry,
Jonathan Wicks4892d8d2017-02-24 16:21:26 -07002781 .mmu_get_qtimer_global_entry = kgsl_iommu_get_qtimer_global_entry,
Shrenuj Bansala419c792016-10-20 14:05:11 -07002782 .probe = kgsl_iommu_probe,
2783};
2784
2785static struct kgsl_mmu_pt_ops iommu_pt_ops = {
2786 .mmu_map = kgsl_iommu_map,
2787 .mmu_unmap = kgsl_iommu_unmap,
2788 .mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
2789 .get_ttbr0 = kgsl_iommu_get_ttbr0,
2790 .get_contextidr = kgsl_iommu_get_contextidr,
2791 .get_gpuaddr = kgsl_iommu_get_gpuaddr,
2792 .put_gpuaddr = kgsl_iommu_put_gpuaddr,
2793 .set_svm_region = kgsl_iommu_set_svm_region,
2794 .find_svm_region = kgsl_iommu_find_svm_region,
2795 .svm_range = kgsl_iommu_svm_range,
2796 .addr_in_range = kgsl_iommu_addr_in_range,
2797 .mmu_map_offset = kgsl_iommu_map_offset,
2798 .mmu_unmap_offset = kgsl_iommu_unmap_offset,
2799 .mmu_sparse_dummy_map = kgsl_iommu_sparse_dummy_map,
2800};