blob: 28ef1a5c3a909195d90e4a23ad7c2d7d170b4891 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_priv.h
3 *
4 * Copyright (C) 2011 Google, Inc.
Laura Abbotta8c373f2013-02-15 09:25:35 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef _ION_PRIV_H
19#define _ION_PRIV_H
20
21#include <linux/kref.h>
22#include <linux/mm_types.h>
23#include <linux/mutex.h>
24#include <linux/rbtree.h>
25#include <linux/ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070026#include <linux/iommu.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070027#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070028
Olav Hauganb3676592012-03-02 15:02:25 -080029enum {
30 DI_PARTITION_NUM = 0,
31 DI_DOMAIN_NUM = 1,
32 DI_MAX,
33};
34
Laura Abbott8c017362011-09-22 20:59:12 -070035/**
36 * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
37 * @iova_addr - iommu virtual address
38 * @node - rb node to exist in the buffer's tree of iommu mappings
39 * @domain_info - contains the partition number and domain number
40 * domain_info[1] = domain number
41 * domain_info[0] = partition number
42 * @ref - for reference counting this mapping
43 * @mapped_size - size of the iova space mapped
44 * (may not be the same as the buffer size)
Olav Hauganb3676592012-03-02 15:02:25 -080045 * @flags - iommu domain/partition specific flags.
Laura Abbott8c017362011-09-22 20:59:12 -070046 *
47 * Represents a mapping of one ion buffer to a particular iommu domain
48 * and address range. There may exist other mappings of this buffer in
49 * different domains or address ranges. All mappings will have the same
50 * cacheability and security.
51 */
52struct ion_iommu_map {
53 unsigned long iova_addr;
54 struct rb_node node;
55 union {
Olav Hauganb3676592012-03-02 15:02:25 -080056 int domain_info[DI_MAX];
Laura Abbott8c017362011-09-22 20:59:12 -070057 uint64_t key;
58 };
59 struct ion_buffer *buffer;
60 struct kref ref;
61 int mapped_size;
Olav Hauganb3676592012-03-02 15:02:25 -080062 unsigned long flags;
Laura Abbott8c017362011-09-22 20:59:12 -070063};
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070064
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070065struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
66
67/**
68 * struct ion_buffer - metadata for a particular buffer
69 * @ref: refernce count
70 * @node: node in the ion_device buffers tree
71 * @dev: back pointer to the ion_device
72 * @heap: back pointer to the heap the buffer came from
73 * @flags: buffer specific flags
74 * @size: size of the buffer
75 * @priv_virt: private data to the buffer representable as
76 * a void *
77 * @priv_phys: private data to the buffer representable as
78 * an ion_phys_addr_t (and someday a phys_addr_t)
79 * @lock: protects the buffers cnt fields
80 * @kmap_cnt: number of times the buffer is mapped to the kernel
81 * @vaddr: the kenrel mapping if kmap_cnt is not zero
82 * @dmap_cnt: number of times the buffer is mapped for dma
Laura Abbottb14ed962012-01-30 14:18:08 -080083 * @sg_table: the sg table for the buffer if dmap_cnt is not zero
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070084*/
85struct ion_buffer {
86 struct kref ref;
87 struct rb_node node;
88 struct ion_device *dev;
89 struct ion_heap *heap;
90 unsigned long flags;
91 size_t size;
92 union {
93 void *priv_virt;
94 ion_phys_addr_t priv_phys;
95 };
96 struct mutex lock;
97 int kmap_cnt;
98 void *vaddr;
99 int dmap_cnt;
Laura Abbottb14ed962012-01-30 14:18:08 -0800100 struct sg_table *sg_table;
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700101 unsigned long *dirty;
102 struct list_head vmas;
Laura Abbott8c017362011-09-22 20:59:12 -0700103 unsigned int iommu_map_cnt;
104 struct rb_root iommu_maps;
Laura Abbott404f8242011-10-31 14:22:53 -0700105 int marked;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700106};
107
108/**
109 * struct ion_heap_ops - ops to operate on a given heap
110 * @allocate: allocate memory
111 * @free: free memory
112 * @phys get physical address of a buffer (only define on
113 * physically contiguous heaps)
114 * @map_dma map the memory for dma to a scatterlist
115 * @unmap_dma unmap the memory for dma
116 * @map_kernel map memory to the kernel
117 * @unmap_kernel unmap memory to the kernel
118 * @map_user map memory to userspace
Alex Bird8a3ede32011-11-07 12:33:42 -0800119 * @unmap_user unmap memory to userspace
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700120 */
121struct ion_heap_ops {
122 int (*allocate) (struct ion_heap *heap,
123 struct ion_buffer *buffer, unsigned long len,
124 unsigned long align, unsigned long flags);
125 void (*free) (struct ion_buffer *buffer);
126 int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
127 ion_phys_addr_t *addr, size_t *len);
Laura Abbottb14ed962012-01-30 14:18:08 -0800128 struct sg_table *(*map_dma) (struct ion_heap *heap,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700129 struct ion_buffer *buffer);
130 void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
Laura Abbottb14ed962012-01-30 14:18:08 -0800131 void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700132 void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
133 int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800134 struct vm_area_struct *vma);
Alex Bird8a3ede32011-11-07 12:33:42 -0800135 void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700136 int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
137 void *vaddr, unsigned int offset,
138 unsigned int length, unsigned int cmd);
Laura Abbott8c017362011-09-22 20:59:12 -0700139 int (*map_iommu)(struct ion_buffer *buffer,
140 struct ion_iommu_map *map_data,
141 unsigned int domain_num,
142 unsigned int partition_num,
143 unsigned long align,
144 unsigned long iova_length,
145 unsigned long flags);
146 void (*unmap_iommu)(struct ion_iommu_map *data);
Olav Haugan0671b9a2012-05-25 11:58:56 -0700147 int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
148 const struct rb_root *mem_map);
Laura Abbott7e446482012-06-13 15:59:39 -0700149 int (*secure_heap)(struct ion_heap *heap, int version, void *data);
150 int (*unsecure_heap)(struct ion_heap *heap, int version, void *data);
Laura Abbott93619302012-10-11 11:51:40 -0700151 int (*secure_buffer)(struct ion_buffer *buffer, int version,
152 void *data, int flags);
153 int (*unsecure_buffer)(struct ion_buffer *buffer, int force_unsecure);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700154};
155
156/**
157 * struct ion_heap - represents a heap in the system
158 * @node: rb node to put the heap on the device's tree of heaps
159 * @dev: back pointer to the ion_device
160 * @type: type of heap
161 * @ops: ops struct as above
162 * @id: id of heap, also indicates priority of this heap when
163 * allocating. These are specified by platform data and
164 * MUST be unique
165 * @name: used for debugging
Benjamin Gaignard8dff0a62012-06-25 15:30:18 -0700166 * @priv: private heap data
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700167 *
168 * Represents a pool of memory from which buffers can be made. In some
169 * systems the only heap is regular system memory allocated via vmalloc.
170 * On others, some blocks might require large physically contiguous buffers
171 * that are allocated from a specially reserved heap.
172 */
173struct ion_heap {
174 struct rb_node node;
175 struct ion_device *dev;
176 enum ion_heap_type type;
177 struct ion_heap_ops *ops;
178 int id;
179 const char *name;
Benjamin Gaignard8dff0a62012-06-25 15:30:18 -0700180 void *priv;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700181};
182
183/**
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700184 * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
185 * @buffer: buffer
186 *
187 * indicates whether userspace mappings of this buffer will be faulted
188 * in, this can affect how buffers are allocated from the heap.
189 */
190bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
191
192/**
Olav Haugan0671b9a2012-05-25 11:58:56 -0700193 * struct mem_map_data - represents information about the memory map for a heap
194 * @node: rb node used to store in the tree of mem_map_data
195 * @addr: start address of memory region.
196 * @addr: end address of memory region.
197 * @size: size of memory region
198 * @client_name: name of the client who owns this buffer.
199 *
200 */
201struct mem_map_data {
202 struct rb_node node;
Laura Abbottb932e422013-03-13 15:08:02 -0700203 ion_phys_addr_t addr;
204 ion_phys_addr_t addr_end;
Olav Haugan0671b9a2012-05-25 11:58:56 -0700205 unsigned long size;
206 const char *client_name;
207};
Laura Abbott8c017362011-09-22 20:59:12 -0700208
209#define iommu_map_domain(__m) ((__m)->domain_info[1])
210#define iommu_map_partition(__m) ((__m)->domain_info[0])
211
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700212/**
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700213 * ion_device_create - allocates and returns an ion device
214 * @custom_ioctl: arch specific ioctl function if applicable
215 *
216 * returns a valid device or -PTR_ERR
217 */
218struct ion_device *ion_device_create(long (*custom_ioctl)
219 (struct ion_client *client,
220 unsigned int cmd,
221 unsigned long arg));
222
223/**
224 * ion_device_destroy - free and device and it's resource
225 * @dev: the device
226 */
227void ion_device_destroy(struct ion_device *dev);
228
229/**
230 * ion_device_add_heap - adds a heap to the ion device
231 * @dev: the device
232 * @heap: the heap to add
233 */
234void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
235
236/**
237 * functions for creating and destroying the built in ion heaps.
238 * architectures can add their own custom architecture specific
239 * heaps as appropriate.
240 */
241
242struct ion_heap *ion_heap_create(struct ion_platform_heap *);
243void ion_heap_destroy(struct ion_heap *);
244
245struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
246void ion_system_heap_destroy(struct ion_heap *);
247
248struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
249void ion_system_contig_heap_destroy(struct ion_heap *);
250
251struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
252void ion_carveout_heap_destroy(struct ion_heap *);
Laura Abbott8c017362011-09-22 20:59:12 -0700253
254struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
255void ion_iommu_heap_destroy(struct ion_heap *);
256
Olav Haugan0a852512012-01-09 10:20:55 -0800257struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *);
258void ion_cp_heap_destroy(struct ion_heap *);
259
Laura Abbottcaafeea2011-12-13 11:43:10 -0800260struct ion_heap *ion_reusable_heap_create(struct ion_platform_heap *);
261void ion_reusable_heap_destroy(struct ion_heap *);
262
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700263/**
264 * kernel api to allocate/free from carveout -- used when carveout is
265 * used to back an architecture specific custom heap
266 */
267ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
268 unsigned long align);
269void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
270 unsigned long size);
Laura Abbott8c017362011-09-22 20:59:12 -0700271
Benjamin Gaignard07b590e2012-08-15 10:55:10 -0700272#ifdef CONFIG_CMA
273struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
274void ion_cma_heap_destroy(struct ion_heap *);
Laura Abbotta8c373f2013-02-15 09:25:35 -0800275
276struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *);
277void ion_secure_cma_heap_destroy(struct ion_heap *);
Benjamin Gaignard07b590e2012-08-15 10:55:10 -0700278#endif
Laura Abbott8c017362011-09-22 20:59:12 -0700279
280struct ion_heap *msm_get_contiguous_heap(void);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700281/**
Olav Haugan0a852512012-01-09 10:20:55 -0800282 * The carveout/cp heap returns physical addresses, since 0 may be a valid
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700283 * physical address, this is used to indicate allocation failed
284 */
285#define ION_CARVEOUT_ALLOCATE_FAIL -1
Olav Haugan0a852512012-01-09 10:20:55 -0800286#define ION_CP_ALLOCATE_FAIL -1
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700287
Laura Abbottcaafeea2011-12-13 11:43:10 -0800288/**
289 * The reserved heap returns physical addresses, since 0 may be a valid
290 * physical address, this is used to indicate allocation failed
291 */
292#define ION_RESERVED_ALLOCATE_FAIL -1
293
294/**
295 * ion_map_fmem_buffer - map fmem allocated memory into the kernel
296 * @buffer - buffer to map
297 * @phys_base - physical base of the heap
298 * @virt_base - virtual base of the heap
299 * @flags - flags for the heap
300 *
301 * Map fmem allocated memory into the kernel address space. This
302 * is designed to be used by other heaps that need fmem behavior.
303 * The virtual range must be pre-allocated.
304 */
305void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
306 void *virt_base, unsigned long flags);
307
Olav Haugan41f85792012-02-08 15:28:05 -0800308/**
309 * ion_do_cache_op - do cache operations.
310 *
311 * @client - pointer to ION client.
312 * @handle - pointer to buffer handle.
313 * @uaddr - virtual address to operate on.
314 * @offset - offset from physical address.
315 * @len - Length of data to do cache operation on.
316 * @cmd - Cache operation to perform:
317 * ION_IOC_CLEAN_CACHES
318 * ION_IOC_INV_CACHES
319 * ION_IOC_CLEAN_INV_CACHES
320 *
321 * Returns 0 on success
322 */
323int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
324 void *uaddr, unsigned long offset, unsigned long len,
325 unsigned int cmd);
326
Olav Haugan0671b9a2012-05-25 11:58:56 -0700327void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
328 unsigned long *size);
329
330void ion_mem_map_show(struct ion_heap *heap);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700331
Laura Abbott93619302012-10-11 11:51:40 -0700332
333
334int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
335 int version, void *data, int flags);
336
337int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle);
Laura Abbott4afbd8b2013-02-15 09:21:33 -0800338
339int ion_heap_allow_secure_allocation(enum ion_heap_type type);
340
341int ion_heap_allow_heap_secure(enum ion_heap_type type);
342
343int ion_heap_allow_handle_secure(enum ion_heap_type type);
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800344
345/**
346 * ion_create_chunked_sg_table - helper function to create sg table
347 * with specified chunk size
348 * @buffer_base: The starting address used for the sg dma address
349 * @chunk_size: The size of each entry in the sg table
350 * @total_size: The total size of the sg table (i.e. the sum of the
351 * entries). This will be rounded up to the nearest
352 * multiple of `chunk_size'
353 */
354struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
355 size_t chunk_size, size_t total_size);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700356#endif /* _ION_PRIV_H */