blob: dd41e4ec9a0139f615adb9f07773e7f97d700c1e [file] [log] [blame]
Lynus Vazeb7af682017-04-17 18:36:01 +05301/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_SHAREDMEM_H
14#define __KGSL_SHAREDMEM_H
15
16#include <linux/dma-mapping.h>
17
18#include "kgsl_mmu.h"
19
20struct kgsl_device;
21struct kgsl_process_private;
22
23#define KGSL_CACHE_OP_INV 0x01
24#define KGSL_CACHE_OP_FLUSH 0x02
25#define KGSL_CACHE_OP_CLEAN 0x03
26
27int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
28 struct kgsl_memdesc *memdesc,
29 uint64_t size);
30
31void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
32
33int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
34 uint32_t *dst,
35 uint64_t offsetbytes);
36
37int kgsl_sharedmem_writel(struct kgsl_device *device,
38 const struct kgsl_memdesc *memdesc,
39 uint64_t offsetbytes,
40 uint32_t src);
41
42int kgsl_sharedmem_readq(const struct kgsl_memdesc *memdesc,
43 uint64_t *dst,
44 uint64_t offsetbytes);
45
46int kgsl_sharedmem_writeq(struct kgsl_device *device,
47 const struct kgsl_memdesc *memdesc,
48 uint64_t offsetbytes,
49 uint64_t src);
50
51int kgsl_sharedmem_set(struct kgsl_device *device,
52 const struct kgsl_memdesc *memdesc,
53 uint64_t offsetbytes, unsigned int value,
54 uint64_t sizebytes);
55
56int kgsl_cache_range_op(struct kgsl_memdesc *memdesc,
57 uint64_t offset, uint64_t size,
58 unsigned int op);
59
60void kgsl_process_init_sysfs(struct kgsl_device *device,
61 struct kgsl_process_private *private);
62void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
63
64int kgsl_sharedmem_init_sysfs(void);
65void kgsl_sharedmem_uninit_sysfs(void);
66
67int kgsl_allocate_user(struct kgsl_device *device,
68 struct kgsl_memdesc *memdesc,
69 uint64_t size, uint64_t flags);
70
71void kgsl_get_memory_usage(char *str, size_t len, uint64_t memflags);
72
73int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
74 uint64_t size);
75
76#define MEMFLAGS(_flags, _mask, _shift) \
77 ((unsigned int) (((_flags) & (_mask)) >> (_shift)))
78
79/*
80 * kgsl_memdesc_get_align - Get alignment flags from a memdesc
81 * @memdesc - the memdesc
82 *
83 * Returns the alignment requested, as power of 2 exponent.
84 */
85static inline int
86kgsl_memdesc_get_align(const struct kgsl_memdesc *memdesc)
87{
88 return MEMFLAGS(memdesc->flags, KGSL_MEMALIGN_MASK,
89 KGSL_MEMALIGN_SHIFT);
90}
91
92/*
93 * kgsl_memdesc_get_pagesize - Get pagesize based on alignment
94 * @memdesc - the memdesc
95 *
96 * Returns the pagesize based on memdesc alignment
97 */
98static inline int
99kgsl_memdesc_get_pagesize(const struct kgsl_memdesc *memdesc)
100{
101 return (1 << kgsl_memdesc_get_align(memdesc));
102}
103
104/*
105 * kgsl_memdesc_get_cachemode - Get cache mode of a memdesc
106 * @memdesc: the memdesc
107 *
108 * Returns a KGSL_CACHEMODE* value.
109 */
110static inline int
111kgsl_memdesc_get_cachemode(const struct kgsl_memdesc *memdesc)
112{
113 return MEMFLAGS(memdesc->flags, KGSL_CACHEMODE_MASK,
114 KGSL_CACHEMODE_SHIFT);
115}
116
117static inline unsigned int
118kgsl_memdesc_get_memtype(const struct kgsl_memdesc *memdesc)
119{
120 return MEMFLAGS(memdesc->flags, KGSL_MEMTYPE_MASK,
121 KGSL_MEMTYPE_SHIFT);
122}
123/*
124 * kgsl_memdesc_set_align - Set alignment flags of a memdesc
125 * @memdesc - the memdesc
126 * @align - alignment requested, as a power of 2 exponent.
127 */
128static inline int
129kgsl_memdesc_set_align(struct kgsl_memdesc *memdesc, unsigned int align)
130{
131 if (align > 32)
132 align = 32;
133
Lynus Vazeb7af682017-04-17 18:36:01 +0530134 memdesc->flags &= ~(uint64_t)KGSL_MEMALIGN_MASK;
135 memdesc->flags |= (uint64_t)((align << KGSL_MEMALIGN_SHIFT) &
136 KGSL_MEMALIGN_MASK);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700137 return 0;
138}
139
140/**
141 * kgsl_memdesc_usermem_type - return buffer type
142 * @memdesc - the memdesc
143 *
144 * Returns a KGSL_MEM_ENTRY_* value for this buffer, which
145 * identifies if was allocated by us, or imported from
146 * another allocator.
147 */
148static inline unsigned int
149kgsl_memdesc_usermem_type(const struct kgsl_memdesc *memdesc)
150{
151 return MEMFLAGS(memdesc->flags, KGSL_MEMFLAGS_USERMEM_MASK,
152 KGSL_MEMFLAGS_USERMEM_SHIFT);
153}
154
155/**
156 * memdesg_sg_dma() - Turn a dma_addr (from CMA) into a sg table
157 * @memdesc: Pointer to the memdesc structure
158 * @addr: Physical address from the dma_alloc function
159 * @size: Size of the chunk
160 *
161 * Create a sg table for the contigious chunk specified by addr and size.
162 */
163static inline int
164memdesc_sg_dma(struct kgsl_memdesc *memdesc,
165 phys_addr_t addr, uint64_t size)
166{
167 int ret;
168 struct page *page = phys_to_page(addr);
169
170 memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
171 if (memdesc->sgt == NULL)
172 return -ENOMEM;
173
174 ret = sg_alloc_table(memdesc->sgt, 1, GFP_KERNEL);
175 if (ret) {
176 kfree(memdesc->sgt);
177 memdesc->sgt = NULL;
178 return ret;
179 }
180
181 sg_set_page(memdesc->sgt->sgl, page, (size_t) size, 0);
182 return 0;
183}
184
185/*
186 * kgsl_memdesc_is_global - is this a globally mapped buffer?
187 * @memdesc: the memdesc
188 *
189 * Returns nonzero if this is a global mapping, 0 otherwise
190 */
191static inline int kgsl_memdesc_is_global(const struct kgsl_memdesc *memdesc)
192{
193 return (memdesc->priv & KGSL_MEMDESC_GLOBAL) != 0;
194}
195
196/*
197 * kgsl_memdesc_is_secured - is this a secure buffer?
198 * @memdesc: the memdesc
199 *
200 * Returns true if this is a secure mapping, false otherwise
201 */
202static inline bool kgsl_memdesc_is_secured(const struct kgsl_memdesc *memdesc)
203{
204 return memdesc && (memdesc->priv & KGSL_MEMDESC_SECURE);
205}
206
207/*
208 * kgsl_memdesc_has_guard_page - is the last page a guard page?
209 * @memdesc - the memdesc
210 *
211 * Returns nonzero if there is a guard page, 0 otherwise
212 */
213static inline int
214kgsl_memdesc_has_guard_page(const struct kgsl_memdesc *memdesc)
215{
216 return (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE) != 0;
217}
218
219/*
220 * kgsl_memdesc_guard_page_size - returns guard page size
221 * @memdesc - the memdesc
222 *
223 * Returns guard page size
224 */
225static inline uint64_t
226kgsl_memdesc_guard_page_size(const struct kgsl_memdesc *memdesc)
227{
228 if (!kgsl_memdesc_has_guard_page(memdesc))
229 return 0;
230
231 if (kgsl_memdesc_is_secured(memdesc)) {
232 if (memdesc->pagetable != NULL &&
233 memdesc->pagetable->mmu != NULL)
234 return memdesc->pagetable->mmu->secure_align_mask + 1;
235 }
236
237 return PAGE_SIZE;
238}
239
240/*
241 * kgsl_memdesc_use_cpu_map - use the same virtual mapping on CPU and GPU?
242 * @memdesc - the memdesc
243 */
244static inline int
245kgsl_memdesc_use_cpu_map(const struct kgsl_memdesc *memdesc)
246{
247 return (memdesc->flags & KGSL_MEMFLAGS_USE_CPU_MAP) != 0;
248}
249
250/*
251 * kgsl_memdesc_footprint - get the size of the mmap region
252 * @memdesc - the memdesc
253 *
254 * The entire memdesc must be mapped. Additionally if the
255 * CPU mapping is going to be mirrored, there must be room
256 * for the guard page to be mapped so that the address spaces
257 * match up.
258 */
259static inline uint64_t
260kgsl_memdesc_footprint(const struct kgsl_memdesc *memdesc)
261{
262 return memdesc->size + kgsl_memdesc_guard_page_size(memdesc);
263}
264
265/*
266 * kgsl_allocate_global() - Allocate GPU accessible memory that will be global
267 * across all processes
268 * @device: The device pointer to which the memdesc belongs
269 * @memdesc: Pointer to a KGSL memory descriptor for the memory allocation
270 * @size: size of the allocation
271 * @flags: Allocation flags that control how the memory is mapped
272 * @priv: Priv flags that controls memory attributes
273 *
274 * Allocate contiguous memory for internal use and add the allocation to the
275 * list of global pagetable entries that will be mapped at the same address in
276 * all pagetables. This is for use for device wide GPU allocations such as
277 * ringbuffers.
278 */
279static inline int kgsl_allocate_global(struct kgsl_device *device,
280 struct kgsl_memdesc *memdesc, uint64_t size, uint64_t flags,
281 unsigned int priv, const char *name)
282{
283 int ret;
284
285 memdesc->flags = flags;
286 memdesc->priv = priv;
287
288 if (((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0) ||
289 (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE))
290 ret = kgsl_sharedmem_alloc_contig(device, memdesc,
291 (size_t) size);
292 else {
293 ret = kgsl_sharedmem_page_alloc_user(memdesc, (size_t) size);
294 if (ret == 0) {
295 if (kgsl_memdesc_map(memdesc) == NULL)
296 ret = -ENOMEM;
297 }
298 }
299
300 if (ret == 0)
301 kgsl_mmu_add_global(device, memdesc, name);
302
303 return ret;
304}
305
306/**
307 * kgsl_free_global() - Free a device wide GPU allocation and remove it from the
308 * global pagetable entry list
309 *
310 * @device: Pointer to the device
311 * @memdesc: Pointer to the GPU memory descriptor to free
312 *
313 * Remove the specific memory descriptor from the global pagetable entry list
314 * and free it
315 */
316static inline void kgsl_free_global(struct kgsl_device *device,
317 struct kgsl_memdesc *memdesc)
318{
319 kgsl_mmu_remove_global(device, memdesc);
320 kgsl_sharedmem_free(memdesc);
321}
322
323void kgsl_sharedmem_set_noretry(bool val);
324bool kgsl_sharedmem_get_noretry(void);
325
326/**
327 * kgsl_alloc_sgt_from_pages() - Allocate a sg table
328 *
329 * @memdesc: memory descriptor of the allocation
330 *
331 * Allocate and return pointer to a sg table
332 */
333static inline struct sg_table *kgsl_alloc_sgt_from_pages(
334 struct kgsl_memdesc *m)
335{
336 int ret;
337 struct sg_table *sgt;
338
339 sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
340 if (sgt == NULL)
341 return ERR_PTR(-ENOMEM);
342
343 ret = sg_alloc_table_from_pages(sgt, m->pages, m->page_count, 0,
344 m->size, GFP_KERNEL);
345 if (ret) {
346 kfree(sgt);
347 return ERR_PTR(ret);
348 }
349
350 return sgt;
351}
352
353/**
354 * kgsl_free_sgt() - Free a sg table structure
355 *
356 * @sgt: sg table pointer to be freed
357 *
358 * Free the sg table allocated using sgt and free the
359 * sgt structure itself
360 */
361static inline void kgsl_free_sgt(struct sg_table *sgt)
362{
363 if (sgt != NULL) {
364 sg_free_table(sgt);
365 kfree(sgt);
366 }
367}
368
Hareesh Gunduf32a49f2016-11-21 19:18:29 +0530369/**
370 * kgsl_get_page_size() - Get supported pagesize
371 * @size: Size of the page
372 * @align: Desired alignment of the size
373 *
374 * Return supported pagesize
375 */
376#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
377static inline int kgsl_get_page_size(size_t size, unsigned int align)
378{
379 if (align >= ilog2(SZ_1M) && size >= SZ_1M)
380 return SZ_1M;
381 else if (align >= ilog2(SZ_64K) && size >= SZ_64K)
382 return SZ_64K;
383 else if (align >= ilog2(SZ_8K) && size >= SZ_8K)
384 return SZ_8K;
385 else
386 return PAGE_SIZE;
387}
388#else
389static inline int kgsl_get_page_size(size_t size, unsigned int align)
390{
391 return PAGE_SIZE;
392}
393#endif
394
Shrenuj Bansala419c792016-10-20 14:05:11 -0700395#endif /* __KGSL_SHAREDMEM_H */