blob: 00ce33f0935c1043644e39b5600bbc770afed32e [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_priv.h
3 *
4 * Copyright (C) 2011 Google, Inc.
Olav Haugan0a852512012-01-09 10:20:55 -08005 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef _ION_PRIV_H
19#define _ION_PRIV_H
20
21#include <linux/kref.h>
22#include <linux/mm_types.h>
23#include <linux/mutex.h>
24#include <linux/rbtree.h>
25#include <linux/ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070026#include <linux/iommu.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070027
28struct ion_mapping;
29
30struct ion_dma_mapping {
31 struct kref ref;
32 struct scatterlist *sglist;
33};
34
35struct ion_kernel_mapping {
36 struct kref ref;
37 void *vaddr;
38};
39
Olav Hauganb3676592012-03-02 15:02:25 -080040enum {
41 DI_PARTITION_NUM = 0,
42 DI_DOMAIN_NUM = 1,
43 DI_MAX,
44};
45
Laura Abbott8c017362011-09-22 20:59:12 -070046/**
47 * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
48 * @iova_addr - iommu virtual address
49 * @node - rb node to exist in the buffer's tree of iommu mappings
50 * @domain_info - contains the partition number and domain number
51 * domain_info[1] = domain number
52 * domain_info[0] = partition number
53 * @ref - for reference counting this mapping
54 * @mapped_size - size of the iova space mapped
55 * (may not be the same as the buffer size)
Olav Hauganb3676592012-03-02 15:02:25 -080056 * @flags - iommu domain/partition specific flags.
Laura Abbott8c017362011-09-22 20:59:12 -070057 *
58 * Represents a mapping of one ion buffer to a particular iommu domain
59 * and address range. There may exist other mappings of this buffer in
60 * different domains or address ranges. All mappings will have the same
61 * cacheability and security.
62 */
63struct ion_iommu_map {
64 unsigned long iova_addr;
65 struct rb_node node;
66 union {
Olav Hauganb3676592012-03-02 15:02:25 -080067 int domain_info[DI_MAX];
Laura Abbott8c017362011-09-22 20:59:12 -070068 uint64_t key;
69 };
70 struct ion_buffer *buffer;
71 struct kref ref;
72 int mapped_size;
Olav Hauganb3676592012-03-02 15:02:25 -080073 unsigned long flags;
Laura Abbott8c017362011-09-22 20:59:12 -070074};
75
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070076struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
77
78/**
79 * struct ion_buffer - metadata for a particular buffer
80 * @ref: refernce count
81 * @node: node in the ion_device buffers tree
82 * @dev: back pointer to the ion_device
83 * @heap: back pointer to the heap the buffer came from
84 * @flags: buffer specific flags
85 * @size: size of the buffer
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070086 * @priv_virt: private data to the buffer representable as
87 * a void *
88 * @priv_phys: private data to the buffer representable as
89 * an ion_phys_addr_t (and someday a phys_addr_t)
90 * @lock: protects the buffers cnt fields
91 * @kmap_cnt: number of times the buffer is mapped to the kernel
92 * @vaddr: the kenrel mapping if kmap_cnt is not zero
93 * @dmap_cnt: number of times the buffer is mapped for dma
94 * @sglist: the scatterlist for the buffer is dmap_cnt is not zero
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070095*/
96struct ion_buffer {
97 struct kref ref;
98 struct rb_node node;
99 struct ion_device *dev;
100 struct ion_heap *heap;
101 unsigned long flags;
102 size_t size;
103 union {
104 void *priv_virt;
105 ion_phys_addr_t priv_phys;
106 };
107 struct mutex lock;
108 int kmap_cnt;
109 void *vaddr;
110 int dmap_cnt;
111 struct scatterlist *sglist;
Laura Abbott894fd582011-08-19 13:33:56 -0700112 int umap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700113 unsigned int iommu_map_cnt;
114 struct rb_root iommu_maps;
Laura Abbott404f8242011-10-31 14:22:53 -0700115 int marked;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700116};
117
118/**
119 * struct ion_heap_ops - ops to operate on a given heap
120 * @allocate: allocate memory
121 * @free: free memory
122 * @phys get physical address of a buffer (only define on
123 * physically contiguous heaps)
124 * @map_dma map the memory for dma to a scatterlist
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700125 * @unmap_dma unmap the memory for dma
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700126 * @map_kernel map memory to the kernel
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700127 * @unmap_kernel unmap memory to the kernel
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700128 * @map_user map memory to userspace
Alex Bird8a3ede32011-11-07 12:33:42 -0800129 * @unmap_user unmap memory to userspace
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700130 */
131struct ion_heap_ops {
132 int (*allocate) (struct ion_heap *heap,
133 struct ion_buffer *buffer, unsigned long len,
134 unsigned long align, unsigned long flags);
135 void (*free) (struct ion_buffer *buffer);
136 int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
137 ion_phys_addr_t *addr, size_t *len);
138 struct scatterlist *(*map_dma) (struct ion_heap *heap,
139 struct ion_buffer *buffer);
140 void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
Laura Abbott894fd582011-08-19 13:33:56 -0700141 void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer,
142 unsigned long flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700143 void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
144 int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
Laura Abbott894fd582011-08-19 13:33:56 -0700145 struct vm_area_struct *vma, unsigned long flags);
Alex Bird8a3ede32011-11-07 12:33:42 -0800146 void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700147 int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
148 void *vaddr, unsigned int offset,
149 unsigned int length, unsigned int cmd);
Laura Abbott8c017362011-09-22 20:59:12 -0700150 int (*map_iommu)(struct ion_buffer *buffer,
151 struct ion_iommu_map *map_data,
152 unsigned int domain_num,
153 unsigned int partition_num,
154 unsigned long align,
155 unsigned long iova_length,
156 unsigned long flags);
157 void (*unmap_iommu)(struct ion_iommu_map *data);
Olav Haugan0671b9a2012-05-25 11:58:56 -0700158 int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
159 const struct rb_root *mem_map);
Olav Haugan0a852512012-01-09 10:20:55 -0800160 int (*secure_heap)(struct ion_heap *heap);
161 int (*unsecure_heap)(struct ion_heap *heap);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700162};
163
164/**
165 * struct ion_heap - represents a heap in the system
166 * @node: rb node to put the heap on the device's tree of heaps
167 * @dev: back pointer to the ion_device
168 * @type: type of heap
169 * @ops: ops struct as above
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700170 * @id: id of heap, also indicates priority of this heap when
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700171 * allocating. These are specified by platform data and
172 * MUST be unique
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700173 * @name: used for debugging
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700174 *
175 * Represents a pool of memory from which buffers can be made. In some
176 * systems the only heap is regular system memory allocated via vmalloc.
177 * On others, some blocks might require large physically contiguous buffers
178 * that are allocated from a specially reserved heap.
179 */
180struct ion_heap {
181 struct rb_node node;
182 struct ion_device *dev;
183 enum ion_heap_type type;
184 struct ion_heap_ops *ops;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700185 int id;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700186 const char *name;
187};
188
Olav Haugan0671b9a2012-05-25 11:58:56 -0700189/**
190 * struct mem_map_data - represents information about the memory map for a heap
191 * @node: rb node used to store in the tree of mem_map_data
192 * @addr: start address of memory region.
193 * @addr: end address of memory region.
194 * @size: size of memory region
195 * @client_name: name of the client who owns this buffer.
196 *
197 */
198struct mem_map_data {
199 struct rb_node node;
200 unsigned long addr;
201 unsigned long addr_end;
202 unsigned long size;
203 const char *client_name;
204};
Laura Abbott8c017362011-09-22 20:59:12 -0700205
206#define iommu_map_domain(__m) ((__m)->domain_info[1])
207#define iommu_map_partition(__m) ((__m)->domain_info[0])
208
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700209/**
210 * ion_device_create - allocates and returns an ion device
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700211 * @custom_ioctl: arch specific ioctl function if applicable
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700212 *
213 * returns a valid device or -PTR_ERR
214 */
215struct ion_device *ion_device_create(long (*custom_ioctl)
216 (struct ion_client *client,
217 unsigned int cmd,
218 unsigned long arg));
219
220/**
221 * ion_device_destroy - free and device and it's resource
222 * @dev: the device
223 */
224void ion_device_destroy(struct ion_device *dev);
225
226/**
227 * ion_device_add_heap - adds a heap to the ion device
228 * @dev: the device
229 * @heap: the heap to add
230 */
231void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
232
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700233/**
234 * functions for creating and destroying the built in ion heaps.
235 * architectures can add their own custom architecture specific
236 * heaps as appropriate.
237 */
238
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700239struct ion_heap *ion_heap_create(struct ion_platform_heap *);
240void ion_heap_destroy(struct ion_heap *);
241
242struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
243void ion_system_heap_destroy(struct ion_heap *);
244
245struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
246void ion_system_contig_heap_destroy(struct ion_heap *);
247
248struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
249void ion_carveout_heap_destroy(struct ion_heap *);
Laura Abbott8c017362011-09-22 20:59:12 -0700250
251struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
252void ion_iommu_heap_destroy(struct ion_heap *);
253
Olav Haugan0a852512012-01-09 10:20:55 -0800254struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *);
255void ion_cp_heap_destroy(struct ion_heap *);
256
Laura Abbottcaafeea2011-12-13 11:43:10 -0800257struct ion_heap *ion_reusable_heap_create(struct ion_platform_heap *);
258void ion_reusable_heap_destroy(struct ion_heap *);
259
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700260/**
261 * kernel api to allocate/free from carveout -- used when carveout is
262 * used to back an architecture specific custom heap
263 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700264ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
265 unsigned long align);
266void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
267 unsigned long size);
Laura Abbott8c017362011-09-22 20:59:12 -0700268
269
270struct ion_heap *msm_get_contiguous_heap(void);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700271/**
Olav Haugan0a852512012-01-09 10:20:55 -0800272 * The carveout/cp heap returns physical addresses, since 0 may be a valid
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700273 * physical address, this is used to indicate allocation failed
274 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700275#define ION_CARVEOUT_ALLOCATE_FAIL -1
Olav Haugan0a852512012-01-09 10:20:55 -0800276#define ION_CP_ALLOCATE_FAIL -1
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700277
Laura Abbottcaafeea2011-12-13 11:43:10 -0800278/**
279 * The reserved heap returns physical addresses, since 0 may be a valid
280 * physical address, this is used to indicate allocation failed
281 */
282#define ION_RESERVED_ALLOCATE_FAIL -1
283
284/**
285 * ion_map_fmem_buffer - map fmem allocated memory into the kernel
286 * @buffer - buffer to map
287 * @phys_base - physical base of the heap
288 * @virt_base - virtual base of the heap
289 * @flags - flags for the heap
290 *
291 * Map fmem allocated memory into the kernel address space. This
292 * is designed to be used by other heaps that need fmem behavior.
293 * The virtual range must be pre-allocated.
294 */
295void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
296 void *virt_base, unsigned long flags);
297
Olav Haugan41f85792012-02-08 15:28:05 -0800298/**
299 * ion_do_cache_op - do cache operations.
300 *
301 * @client - pointer to ION client.
302 * @handle - pointer to buffer handle.
303 * @uaddr - virtual address to operate on.
304 * @offset - offset from physical address.
305 * @len - Length of data to do cache operation on.
306 * @cmd - Cache operation to perform:
307 * ION_IOC_CLEAN_CACHES
308 * ION_IOC_INV_CACHES
309 * ION_IOC_CLEAN_INV_CACHES
310 *
311 * Returns 0 on success
312 */
313int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
314 void *uaddr, unsigned long offset, unsigned long len,
315 unsigned int cmd);
316
Olav Haugan0671b9a2012-05-25 11:58:56 -0700317void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
318 unsigned long *size);
319
320void ion_mem_map_show(struct ion_heap *heap);
321
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700322#endif /* _ION_PRIV_H */