blob: 1d40aef132f4d7fe3ca48958f5310e3e5bfb440e [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_priv.h
3 *
4 * Copyright (C) 2011 Google, Inc.
Olav Haugan0a852512012-01-09 10:20:55 -08005 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef _ION_PRIV_H
19#define _ION_PRIV_H
20
21#include <linux/kref.h>
22#include <linux/mm_types.h>
23#include <linux/mutex.h>
24#include <linux/rbtree.h>
25#include <linux/ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070026#include <linux/iommu.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070027
28struct ion_mapping;
29
30struct ion_dma_mapping {
31 struct kref ref;
32 struct scatterlist *sglist;
33};
34
35struct ion_kernel_mapping {
36 struct kref ref;
37 void *vaddr;
38};
39
Laura Abbott8c017362011-09-22 20:59:12 -070040/**
41 * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
42 * @iova_addr - iommu virtual address
43 * @node - rb node to exist in the buffer's tree of iommu mappings
44 * @domain_info - contains the partition number and domain number
45 * domain_info[1] = domain number
46 * domain_info[0] = partition number
47 * @ref - for reference counting this mapping
48 * @mapped_size - size of the iova space mapped
49 * (may not be the same as the buffer size)
50 *
51 * Represents a mapping of one ion buffer to a particular iommu domain
52 * and address range. There may exist other mappings of this buffer in
53 * different domains or address ranges. All mappings will have the same
54 * cacheability and security.
55 */
56struct ion_iommu_map {
57 unsigned long iova_addr;
58 struct rb_node node;
59 union {
60 int domain_info[2];
61 uint64_t key;
62 };
63 struct ion_buffer *buffer;
64 struct kref ref;
65 int mapped_size;
66};
67
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070068struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
69
70/**
71 * struct ion_buffer - metadata for a particular buffer
72 * @ref: refernce count
73 * @node: node in the ion_device buffers tree
74 * @dev: back pointer to the ion_device
75 * @heap: back pointer to the heap the buffer came from
76 * @flags: buffer specific flags
77 * @size: size of the buffer
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070078 * @priv_virt: private data to the buffer representable as
79 * a void *
80 * @priv_phys: private data to the buffer representable as
81 * an ion_phys_addr_t (and someday a phys_addr_t)
82 * @lock: protects the buffers cnt fields
83 * @kmap_cnt: number of times the buffer is mapped to the kernel
84 * @vaddr: the kenrel mapping if kmap_cnt is not zero
85 * @dmap_cnt: number of times the buffer is mapped for dma
86 * @sglist: the scatterlist for the buffer is dmap_cnt is not zero
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070087*/
88struct ion_buffer {
89 struct kref ref;
90 struct rb_node node;
91 struct ion_device *dev;
92 struct ion_heap *heap;
93 unsigned long flags;
94 size_t size;
95 union {
96 void *priv_virt;
97 ion_phys_addr_t priv_phys;
98 };
99 struct mutex lock;
100 int kmap_cnt;
101 void *vaddr;
102 int dmap_cnt;
103 struct scatterlist *sglist;
Laura Abbott894fd582011-08-19 13:33:56 -0700104 int umap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700105 unsigned int iommu_map_cnt;
106 struct rb_root iommu_maps;
Laura Abbott404f8242011-10-31 14:22:53 -0700107 int marked;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700108};
109
110/**
111 * struct ion_heap_ops - ops to operate on a given heap
112 * @allocate: allocate memory
113 * @free: free memory
114 * @phys get physical address of a buffer (only define on
115 * physically contiguous heaps)
116 * @map_dma map the memory for dma to a scatterlist
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700117 * @unmap_dma unmap the memory for dma
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700118 * @map_kernel map memory to the kernel
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700119 * @unmap_kernel unmap memory to the kernel
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700120 * @map_user map memory to userspace
Alex Bird8a3ede32011-11-07 12:33:42 -0800121 * @unmap_user unmap memory to userspace
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700122 */
123struct ion_heap_ops {
124 int (*allocate) (struct ion_heap *heap,
125 struct ion_buffer *buffer, unsigned long len,
126 unsigned long align, unsigned long flags);
127 void (*free) (struct ion_buffer *buffer);
128 int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
129 ion_phys_addr_t *addr, size_t *len);
130 struct scatterlist *(*map_dma) (struct ion_heap *heap,
131 struct ion_buffer *buffer);
132 void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
Laura Abbott894fd582011-08-19 13:33:56 -0700133 void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer,
134 unsigned long flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700135 void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
136 int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
Laura Abbott894fd582011-08-19 13:33:56 -0700137 struct vm_area_struct *vma, unsigned long flags);
Alex Bird8a3ede32011-11-07 12:33:42 -0800138 void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700139 int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
140 void *vaddr, unsigned int offset,
141 unsigned int length, unsigned int cmd);
Laura Abbott8c017362011-09-22 20:59:12 -0700142 int (*map_iommu)(struct ion_buffer *buffer,
143 struct ion_iommu_map *map_data,
144 unsigned int domain_num,
145 unsigned int partition_num,
146 unsigned long align,
147 unsigned long iova_length,
148 unsigned long flags);
149 void (*unmap_iommu)(struct ion_iommu_map *data);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800150 int (*print_debug)(struct ion_heap *heap, struct seq_file *s);
Olav Haugan0a852512012-01-09 10:20:55 -0800151 int (*secure_heap)(struct ion_heap *heap);
152 int (*unsecure_heap)(struct ion_heap *heap);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700153};
154
155/**
156 * struct ion_heap - represents a heap in the system
157 * @node: rb node to put the heap on the device's tree of heaps
158 * @dev: back pointer to the ion_device
159 * @type: type of heap
160 * @ops: ops struct as above
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700161 * @id: id of heap, also indicates priority of this heap when
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700162 * allocating. These are specified by platform data and
163 * MUST be unique
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700164 * @name: used for debugging
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700165 *
166 * Represents a pool of memory from which buffers can be made. In some
167 * systems the only heap is regular system memory allocated via vmalloc.
168 * On others, some blocks might require large physically contiguous buffers
169 * that are allocated from a specially reserved heap.
170 */
171struct ion_heap {
172 struct rb_node node;
173 struct ion_device *dev;
174 enum ion_heap_type type;
175 struct ion_heap_ops *ops;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700176 int id;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700177 const char *name;
178};
179
Laura Abbott8c017362011-09-22 20:59:12 -0700180
181
182#define iommu_map_domain(__m) ((__m)->domain_info[1])
183#define iommu_map_partition(__m) ((__m)->domain_info[0])
184
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700185/**
186 * ion_device_create - allocates and returns an ion device
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700187 * @custom_ioctl: arch specific ioctl function if applicable
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700188 *
189 * returns a valid device or -PTR_ERR
190 */
191struct ion_device *ion_device_create(long (*custom_ioctl)
192 (struct ion_client *client,
193 unsigned int cmd,
194 unsigned long arg));
195
196/**
197 * ion_device_destroy - free and device and it's resource
198 * @dev: the device
199 */
200void ion_device_destroy(struct ion_device *dev);
201
202/**
203 * ion_device_add_heap - adds a heap to the ion device
204 * @dev: the device
205 * @heap: the heap to add
206 */
207void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
208
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700209/**
210 * functions for creating and destroying the built in ion heaps.
211 * architectures can add their own custom architecture specific
212 * heaps as appropriate.
213 */
214
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700215struct ion_heap *ion_heap_create(struct ion_platform_heap *);
216void ion_heap_destroy(struct ion_heap *);
217
218struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
219void ion_system_heap_destroy(struct ion_heap *);
220
221struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
222void ion_system_contig_heap_destroy(struct ion_heap *);
223
224struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
225void ion_carveout_heap_destroy(struct ion_heap *);
Laura Abbott8c017362011-09-22 20:59:12 -0700226
227struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
228void ion_iommu_heap_destroy(struct ion_heap *);
229
Olav Haugan0a852512012-01-09 10:20:55 -0800230struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *);
231void ion_cp_heap_destroy(struct ion_heap *);
232
Laura Abbottcaafeea2011-12-13 11:43:10 -0800233struct ion_heap *ion_reusable_heap_create(struct ion_platform_heap *);
234void ion_reusable_heap_destroy(struct ion_heap *);
235
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700236/**
237 * kernel api to allocate/free from carveout -- used when carveout is
238 * used to back an architecture specific custom heap
239 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700240ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
241 unsigned long align);
242void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
243 unsigned long size);
Laura Abbott8c017362011-09-22 20:59:12 -0700244
245
246struct ion_heap *msm_get_contiguous_heap(void);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700247/**
Olav Haugan0a852512012-01-09 10:20:55 -0800248 * The carveout/cp heap returns physical addresses, since 0 may be a valid
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700249 * physical address, this is used to indicate allocation failed
250 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700251#define ION_CARVEOUT_ALLOCATE_FAIL -1
Olav Haugan0a852512012-01-09 10:20:55 -0800252#define ION_CP_ALLOCATE_FAIL -1
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700253
Laura Abbottcaafeea2011-12-13 11:43:10 -0800254/**
255 * The reserved heap returns physical addresses, since 0 may be a valid
256 * physical address, this is used to indicate allocation failed
257 */
258#define ION_RESERVED_ALLOCATE_FAIL -1
259
260/**
261 * ion_map_fmem_buffer - map fmem allocated memory into the kernel
262 * @buffer - buffer to map
263 * @phys_base - physical base of the heap
264 * @virt_base - virtual base of the heap
265 * @flags - flags for the heap
266 *
267 * Map fmem allocated memory into the kernel address space. This
268 * is designed to be used by other heaps that need fmem behavior.
269 * The virtual range must be pre-allocated.
270 */
271void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
272 void *virt_base, unsigned long flags);
273
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700274#endif /* _ION_PRIV_H */