blob: 5466a49263b65fd276e60f92e7bfbd79e7335a5e [file] [log] [blame]
Lynus Vazeb7af682017-04-17 18:36:01 +05301/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
Shrenuj Bansala419c792016-10-20 14:05:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_SHAREDMEM_H
14#define __KGSL_SHAREDMEM_H
15
16#include <linux/dma-mapping.h>
17
18#include "kgsl_mmu.h"
19
20struct kgsl_device;
21struct kgsl_process_private;
22
23#define KGSL_CACHE_OP_INV 0x01
24#define KGSL_CACHE_OP_FLUSH 0x02
25#define KGSL_CACHE_OP_CLEAN 0x03
26
27int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
28 struct kgsl_memdesc *memdesc,
29 uint64_t size);
30
31void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
32
33int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
34 uint32_t *dst,
35 uint64_t offsetbytes);
36
37int kgsl_sharedmem_writel(struct kgsl_device *device,
38 const struct kgsl_memdesc *memdesc,
39 uint64_t offsetbytes,
40 uint32_t src);
41
42int kgsl_sharedmem_readq(const struct kgsl_memdesc *memdesc,
43 uint64_t *dst,
44 uint64_t offsetbytes);
45
46int kgsl_sharedmem_writeq(struct kgsl_device *device,
47 const struct kgsl_memdesc *memdesc,
48 uint64_t offsetbytes,
49 uint64_t src);
50
51int kgsl_sharedmem_set(struct kgsl_device *device,
52 const struct kgsl_memdesc *memdesc,
53 uint64_t offsetbytes, unsigned int value,
54 uint64_t sizebytes);
55
56int kgsl_cache_range_op(struct kgsl_memdesc *memdesc,
57 uint64_t offset, uint64_t size,
58 unsigned int op);
59
60void kgsl_process_init_sysfs(struct kgsl_device *device,
61 struct kgsl_process_private *private);
62void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
63
64int kgsl_sharedmem_init_sysfs(void);
65void kgsl_sharedmem_uninit_sysfs(void);
66
67int kgsl_allocate_user(struct kgsl_device *device,
68 struct kgsl_memdesc *memdesc,
69 uint64_t size, uint64_t flags);
70
71void kgsl_get_memory_usage(char *str, size_t len, uint64_t memflags);
72
73int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
74 uint64_t size);
75
76#define MEMFLAGS(_flags, _mask, _shift) \
77 ((unsigned int) (((_flags) & (_mask)) >> (_shift)))
78
79/*
80 * kgsl_memdesc_get_align - Get alignment flags from a memdesc
81 * @memdesc - the memdesc
82 *
83 * Returns the alignment requested, as power of 2 exponent.
84 */
85static inline int
86kgsl_memdesc_get_align(const struct kgsl_memdesc *memdesc)
87{
88 return MEMFLAGS(memdesc->flags, KGSL_MEMALIGN_MASK,
89 KGSL_MEMALIGN_SHIFT);
90}
91
92/*
93 * kgsl_memdesc_get_pagesize - Get pagesize based on alignment
94 * @memdesc - the memdesc
95 *
96 * Returns the pagesize based on memdesc alignment
97 */
98static inline int
99kgsl_memdesc_get_pagesize(const struct kgsl_memdesc *memdesc)
100{
101 return (1 << kgsl_memdesc_get_align(memdesc));
102}
103
104/*
105 * kgsl_memdesc_get_cachemode - Get cache mode of a memdesc
106 * @memdesc: the memdesc
107 *
108 * Returns a KGSL_CACHEMODE* value.
109 */
110static inline int
111kgsl_memdesc_get_cachemode(const struct kgsl_memdesc *memdesc)
112{
113 return MEMFLAGS(memdesc->flags, KGSL_CACHEMODE_MASK,
114 KGSL_CACHEMODE_SHIFT);
115}
116
117static inline unsigned int
118kgsl_memdesc_get_memtype(const struct kgsl_memdesc *memdesc)
119{
120 return MEMFLAGS(memdesc->flags, KGSL_MEMTYPE_MASK,
121 KGSL_MEMTYPE_SHIFT);
122}
123/*
124 * kgsl_memdesc_set_align - Set alignment flags of a memdesc
125 * @memdesc - the memdesc
126 * @align - alignment requested, as a power of 2 exponent.
127 */
128static inline int
129kgsl_memdesc_set_align(struct kgsl_memdesc *memdesc, unsigned int align)
130{
131 if (align > 32)
132 align = 32;
133
Lynus Vazeb7af682017-04-17 18:36:01 +0530134 memdesc->flags &= ~(uint64_t)KGSL_MEMALIGN_MASK;
135 memdesc->flags |= (uint64_t)((align << KGSL_MEMALIGN_SHIFT) &
136 KGSL_MEMALIGN_MASK);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700137 return 0;
138}
139
140/**
141 * kgsl_memdesc_usermem_type - return buffer type
142 * @memdesc - the memdesc
143 *
144 * Returns a KGSL_MEM_ENTRY_* value for this buffer, which
145 * identifies if was allocated by us, or imported from
146 * another allocator.
147 */
148static inline unsigned int
149kgsl_memdesc_usermem_type(const struct kgsl_memdesc *memdesc)
150{
151 return MEMFLAGS(memdesc->flags, KGSL_MEMFLAGS_USERMEM_MASK,
152 KGSL_MEMFLAGS_USERMEM_SHIFT);
153}
154
155/**
156 * memdesg_sg_dma() - Turn a dma_addr (from CMA) into a sg table
157 * @memdesc: Pointer to the memdesc structure
158 * @addr: Physical address from the dma_alloc function
159 * @size: Size of the chunk
160 *
161 * Create a sg table for the contigious chunk specified by addr and size.
162 */
163static inline int
164memdesc_sg_dma(struct kgsl_memdesc *memdesc,
165 phys_addr_t addr, uint64_t size)
166{
167 int ret;
168 struct page *page = phys_to_page(addr);
169
170 memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
171 if (memdesc->sgt == NULL)
172 return -ENOMEM;
173
174 ret = sg_alloc_table(memdesc->sgt, 1, GFP_KERNEL);
175 if (ret) {
176 kfree(memdesc->sgt);
177 memdesc->sgt = NULL;
178 return ret;
179 }
180
181 sg_set_page(memdesc->sgt->sgl, page, (size_t) size, 0);
182 return 0;
183}
184
185/*
186 * kgsl_memdesc_is_global - is this a globally mapped buffer?
187 * @memdesc: the memdesc
188 *
189 * Returns nonzero if this is a global mapping, 0 otherwise
190 */
191static inline int kgsl_memdesc_is_global(const struct kgsl_memdesc *memdesc)
192{
193 return (memdesc->priv & KGSL_MEMDESC_GLOBAL) != 0;
194}
195
196/*
197 * kgsl_memdesc_is_secured - is this a secure buffer?
198 * @memdesc: the memdesc
199 *
200 * Returns true if this is a secure mapping, false otherwise
201 */
202static inline bool kgsl_memdesc_is_secured(const struct kgsl_memdesc *memdesc)
203{
204 return memdesc && (memdesc->priv & KGSL_MEMDESC_SECURE);
205}
206
207/*
208 * kgsl_memdesc_has_guard_page - is the last page a guard page?
209 * @memdesc - the memdesc
210 *
211 * Returns nonzero if there is a guard page, 0 otherwise
212 */
213static inline int
214kgsl_memdesc_has_guard_page(const struct kgsl_memdesc *memdesc)
215{
216 return (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE) != 0;
217}
218
219/*
220 * kgsl_memdesc_guard_page_size - returns guard page size
221 * @memdesc - the memdesc
222 *
223 * Returns guard page size
224 */
225static inline uint64_t
226kgsl_memdesc_guard_page_size(const struct kgsl_memdesc *memdesc)
227{
228 if (!kgsl_memdesc_has_guard_page(memdesc))
229 return 0;
230
231 if (kgsl_memdesc_is_secured(memdesc)) {
232 if (memdesc->pagetable != NULL &&
233 memdesc->pagetable->mmu != NULL)
234 return memdesc->pagetable->mmu->secure_align_mask + 1;
235 }
236
237 return PAGE_SIZE;
238}
239
240/*
241 * kgsl_memdesc_use_cpu_map - use the same virtual mapping on CPU and GPU?
242 * @memdesc - the memdesc
243 */
244static inline int
245kgsl_memdesc_use_cpu_map(const struct kgsl_memdesc *memdesc)
246{
247 return (memdesc->flags & KGSL_MEMFLAGS_USE_CPU_MAP) != 0;
248}
249
250/*
251 * kgsl_memdesc_footprint - get the size of the mmap region
252 * @memdesc - the memdesc
253 *
254 * The entire memdesc must be mapped. Additionally if the
255 * CPU mapping is going to be mirrored, there must be room
256 * for the guard page to be mapped so that the address spaces
257 * match up.
258 */
259static inline uint64_t
260kgsl_memdesc_footprint(const struct kgsl_memdesc *memdesc)
261{
262 return memdesc->size + kgsl_memdesc_guard_page_size(memdesc);
263}
264
265/*
266 * kgsl_allocate_global() - Allocate GPU accessible memory that will be global
267 * across all processes
268 * @device: The device pointer to which the memdesc belongs
269 * @memdesc: Pointer to a KGSL memory descriptor for the memory allocation
270 * @size: size of the allocation
271 * @flags: Allocation flags that control how the memory is mapped
272 * @priv: Priv flags that controls memory attributes
273 *
274 * Allocate contiguous memory for internal use and add the allocation to the
275 * list of global pagetable entries that will be mapped at the same address in
276 * all pagetables. This is for use for device wide GPU allocations such as
277 * ringbuffers.
278 */
279static inline int kgsl_allocate_global(struct kgsl_device *device,
280 struct kgsl_memdesc *memdesc, uint64_t size, uint64_t flags,
281 unsigned int priv, const char *name)
282{
283 int ret;
284
285 memdesc->flags = flags;
286 memdesc->priv = priv;
287
288 if (((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0) ||
289 (kgsl_mmu_get_mmutype(device) == KGSL_MMU_TYPE_NONE))
290 ret = kgsl_sharedmem_alloc_contig(device, memdesc,
291 (size_t) size);
292 else {
293 ret = kgsl_sharedmem_page_alloc_user(memdesc, (size_t) size);
294 if (ret == 0) {
Lynus Vaz464c22e2017-06-20 20:32:45 +0530295 if (kgsl_memdesc_map(memdesc) == NULL) {
296 kgsl_sharedmem_free(memdesc);
Shrenuj Bansala419c792016-10-20 14:05:11 -0700297 ret = -ENOMEM;
Lynus Vaz464c22e2017-06-20 20:32:45 +0530298 }
Shrenuj Bansala419c792016-10-20 14:05:11 -0700299 }
300 }
301
302 if (ret == 0)
303 kgsl_mmu_add_global(device, memdesc, name);
304
305 return ret;
306}
307
308/**
309 * kgsl_free_global() - Free a device wide GPU allocation and remove it from the
310 * global pagetable entry list
311 *
312 * @device: Pointer to the device
313 * @memdesc: Pointer to the GPU memory descriptor to free
314 *
315 * Remove the specific memory descriptor from the global pagetable entry list
316 * and free it
317 */
318static inline void kgsl_free_global(struct kgsl_device *device,
319 struct kgsl_memdesc *memdesc)
320{
321 kgsl_mmu_remove_global(device, memdesc);
322 kgsl_sharedmem_free(memdesc);
323}
324
325void kgsl_sharedmem_set_noretry(bool val);
326bool kgsl_sharedmem_get_noretry(void);
327
328/**
329 * kgsl_alloc_sgt_from_pages() - Allocate a sg table
330 *
331 * @memdesc: memory descriptor of the allocation
332 *
333 * Allocate and return pointer to a sg table
334 */
335static inline struct sg_table *kgsl_alloc_sgt_from_pages(
336 struct kgsl_memdesc *m)
337{
338 int ret;
339 struct sg_table *sgt;
340
341 sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
342 if (sgt == NULL)
343 return ERR_PTR(-ENOMEM);
344
345 ret = sg_alloc_table_from_pages(sgt, m->pages, m->page_count, 0,
346 m->size, GFP_KERNEL);
347 if (ret) {
348 kfree(sgt);
349 return ERR_PTR(ret);
350 }
351
352 return sgt;
353}
354
355/**
356 * kgsl_free_sgt() - Free a sg table structure
357 *
358 * @sgt: sg table pointer to be freed
359 *
360 * Free the sg table allocated using sgt and free the
361 * sgt structure itself
362 */
363static inline void kgsl_free_sgt(struct sg_table *sgt)
364{
365 if (sgt != NULL) {
366 sg_free_table(sgt);
367 kfree(sgt);
368 }
369}
370
Hareesh Gunduf32a49f2016-11-21 19:18:29 +0530371/**
372 * kgsl_get_page_size() - Get supported pagesize
373 * @size: Size of the page
374 * @align: Desired alignment of the size
375 *
376 * Return supported pagesize
377 */
378#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
379static inline int kgsl_get_page_size(size_t size, unsigned int align)
380{
381 if (align >= ilog2(SZ_1M) && size >= SZ_1M)
382 return SZ_1M;
383 else if (align >= ilog2(SZ_64K) && size >= SZ_64K)
384 return SZ_64K;
385 else if (align >= ilog2(SZ_8K) && size >= SZ_8K)
386 return SZ_8K;
387 else
388 return PAGE_SIZE;
389}
390#else
391static inline int kgsl_get_page_size(size_t size, unsigned int align)
392{
393 return PAGE_SIZE;
394}
395#endif
396
Shrenuj Bansala419c792016-10-20 14:05:11 -0700397#endif /* __KGSL_SHAREDMEM_H */