blob: ffa819fd07c81312e087c601bf3a8f651116cb32 [file] [log] [blame]
Laura Abbott6438e532012-07-20 10:10:41 -07001/*
Laura Abbott6438e532012-07-20 10:10:41 -07002 *
Laura Abbott7db4e0b2013-01-03 14:20:16 -08003 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Laura Abbott6438e532012-07-20 10:10:41 -07004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#ifndef _LINUX_MSM_ION_H
17#define _LINUX_MSM_ION_H
18
19#include <linux/ion.h>
20
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070021enum msm_ion_heap_types {
22 ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
23 ION_HEAP_TYPE_IOMMU = ION_HEAP_TYPE_MSM_START,
24 ION_HEAP_TYPE_CP,
25};
26
27/**
28 * These are the only ids that should be used for Ion heap ids.
29 * The ids listed are the order in which allocation will be attempted
30 * if specified. Don't swap the order of heap ids unless you know what
31 * you are doing!
32 * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
33 * possible fallbacks)
34 */
35
36enum ion_heap_ids {
37 INVALID_HEAP_ID = -1,
38 ION_CP_MM_HEAP_ID = 8,
39 ION_CP_MFC_HEAP_ID = 12,
40 ION_CP_WB_HEAP_ID = 16, /* 8660 only */
41 ION_CAMERA_HEAP_ID = 20, /* 8660 only */
Neeti Desai9dc9db42012-10-18 17:53:51 -070042 ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070043 ION_SF_HEAP_ID = 24,
44 ION_IOMMU_HEAP_ID = 25,
Neeti Desai9dc9db42012-10-18 17:53:51 -070045 ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070046 ION_QSECOM_HEAP_ID = 27,
47 ION_AUDIO_HEAP_ID = 28,
48
49 ION_MM_FIRMWARE_HEAP_ID = 29,
50 ION_SYSTEM_HEAP_ID = 30,
51
52 ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_SECURE flag */
53};
54
55enum ion_fixed_position {
56 NOT_FIXED,
57 FIXED_LOW,
58 FIXED_MIDDLE,
59 FIXED_HIGH,
60};
61
62enum cp_mem_usage {
63 VIDEO_BITSTREAM = 0x1,
64 VIDEO_PIXEL = 0x2,
65 VIDEO_NONPIXEL = 0x3,
66 MAX_USAGE = 0x4,
67 UNKNOWN = 0x7FFFFFFF,
68};
69
70#define ION_HEAP_CP_MASK (1 << ION_HEAP_TYPE_CP)
71
72/**
73 * Flag to use when allocating to indicate that a heap is secure.
74 */
75#define ION_SECURE (1 << ION_HEAP_ID_RESERVED)
76
77/**
Laura Abbott7db4e0b2013-01-03 14:20:16 -080078 * Flag for clients to force contiguous memort allocation
79 *
80 * Use of this flag is carefully monitored!
81 */
82#define ION_FORCE_CONTIGUOUS (1 << 30)
83
84/**
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070085 * Macro should be used with ion_heap_ids defined above.
86 */
87#define ION_HEAP(bit) (1 << (bit))
88
89#define ION_VMALLOC_HEAP_NAME "vmalloc"
90#define ION_AUDIO_HEAP_NAME "audio"
91#define ION_SF_HEAP_NAME "sf"
92#define ION_MM_HEAP_NAME "mm"
93#define ION_CAMERA_HEAP_NAME "camera_preview"
94#define ION_IOMMU_HEAP_NAME "iommu"
95#define ION_MFC_HEAP_NAME "mfc"
96#define ION_WB_HEAP_NAME "wb"
97#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw"
Neeti Desai9dc9db42012-10-18 17:53:51 -070098#define ION_PIL1_HEAP_NAME "pil_1"
99#define ION_PIL2_HEAP_NAME "pil_2"
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700100#define ION_QSECOM_HEAP_NAME "qsecom"
101#define ION_FMEM_HEAP_NAME "fmem"
102
103#define ION_SET_CACHED(__cache) (__cache | ION_FLAG_CACHED)
104#define ION_SET_UNCACHED(__cache) (__cache & ~ION_FLAG_CACHED)
105
106#define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED)
107
108#ifdef __KERNEL__
109
110/*
111 * This flag allows clients when mapping into the IOMMU to specify to
112 * defer un-mapping from the IOMMU until the buffer memory is freed.
113 */
114#define ION_IOMMU_UNMAP_DELAYED 1
115
Laura Abbott93619302012-10-11 11:51:40 -0700116/*
117 * This flag allows clients to defer unsecuring a buffer until the buffer
118 * is actually freed.
119 */
120#define ION_UNSECURE_DELAYED 1
121
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700122/**
123 * struct ion_cp_heap_pdata - defines a content protection heap in the given
124 * platform
125 * @permission_type: Memory ID used to identify the memory to TZ
126 * @align: Alignment requirement for the memory
127 * @secure_base: Base address for securing the heap.
128 * Note: This might be different from actual base address
129 * of this heap in the case of a shared heap.
130 * @secure_size: Memory size for securing the heap.
131 * Note: This might be different from actual size
132 * of this heap in the case of a shared heap.
133 * @reusable Flag indicating whether this heap is reusable of not.
134 * (see FMEM)
135 * @mem_is_fmem Flag indicating whether this memory is coming from fmem
136 * or not.
137 * @fixed_position If nonzero, position in the fixed area.
138 * @virt_addr: Virtual address used when using fmem.
139 * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
140 * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
141 * @request_region: function to be called when the number of allocations
142 * goes from 0 -> 1
143 * @release_region: function to be called when the number of allocations
144 * goes from 1 -> 0
145 * @setup_region: function to be called upon ion registration
146 * @memory_type:Memory type used for the heap
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800147 * @allow_nonsecure_alloc: allow non-secure allocations from this heap. For
148 * secure heaps, this flag must be set so allow non-secure
149 * allocations. For non-secure heaps, this flag is ignored.
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700150 *
151 */
152struct ion_cp_heap_pdata {
153 enum ion_permission_type permission_type;
154 unsigned int align;
155 ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
156 size_t secure_size; /* Size used for securing heap when heap is shared*/
157 int reusable;
158 int mem_is_fmem;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700159 int is_cma;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700160 enum ion_fixed_position fixed_position;
161 int iommu_map_all;
162 int iommu_2x_map_domain;
163 ion_virt_addr_t *virt_addr;
164 int (*request_region)(void *);
165 int (*release_region)(void *);
166 void *(*setup_region)(void);
167 enum ion_memory_types memory_type;
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800168 int allow_nonsecure_alloc;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700169};
170
171/**
172 * struct ion_co_heap_pdata - defines a carveout heap in the given platform
173 * @adjacent_mem_id: Id of heap that this heap must be adjacent to.
174 * @align: Alignment requirement for the memory
175 * @mem_is_fmem Flag indicating whether this memory is coming from fmem
176 * or not.
177 * @fixed_position If nonzero, position in the fixed area.
178 * @request_region: function to be called when the number of allocations
179 * goes from 0 -> 1
180 * @release_region: function to be called when the number of allocations
181 * goes from 1 -> 0
182 * @setup_region: function to be called upon ion registration
183 * @memory_type:Memory type used for the heap
184 *
185 */
186struct ion_co_heap_pdata {
187 int adjacent_mem_id;
188 unsigned int align;
189 int mem_is_fmem;
190 enum ion_fixed_position fixed_position;
191 int (*request_region)(void *);
192 int (*release_region)(void *);
193 void *(*setup_region)(void);
194 enum ion_memory_types memory_type;
195};
196
197#ifdef CONFIG_ION
198/**
199 * msm_ion_secure_heap - secure a heap. Wrapper around ion_secure_heap.
200 *
201 * @heap_id - heap id to secure.
202 *
203 * Secure a heap
204 * Returns 0 on success
205 */
206int msm_ion_secure_heap(int heap_id);
207
208/**
209 * msm_ion_unsecure_heap - unsecure a heap. Wrapper around ion_unsecure_heap.
210 *
211 * @heap_id - heap id to secure.
212 *
213 * Un-secure a heap
214 * Returns 0 on success
215 */
216int msm_ion_unsecure_heap(int heap_id);
217
218/**
219 * msm_ion_secure_heap_2_0 - secure a heap using 2.0 APIs
220 * Wrapper around ion_secure_heap.
221 *
222 * @heap_id - heap id to secure.
223 * @usage - usage hint to TZ
224 *
225 * Secure a heap
226 * Returns 0 on success
227 */
228int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage);
229
230/**
231 * msm_ion_unsecure_heap - unsecure a heap secured with 3.0 APIs.
232 * Wrapper around ion_unsecure_heap.
233 *
234 * @heap_id - heap id to secure.
235 * @usage - usage hint to TZ
236 *
237 * Un-secure a heap
238 * Returns 0 on success
239 */
240int msm_ion_unsecure_heap_2_0(int heap_id, enum cp_mem_usage usage);
Laura Abbott93619302012-10-11 11:51:40 -0700241
242/**
243 * msm_ion_secure_buffer - secure an individual buffer
244 *
245 * @client - client who has access to the buffer
246 * @handle - buffer to secure
247 * @usage - usage hint to TZ
248 * @flags - flags for the securing
249 */
250int msm_ion_secure_buffer(struct ion_client *client, struct ion_handle *handle,
251 enum cp_mem_usage usage, int flags);
252
253/**
254 * msm_ion_unsecure_buffer - unsecure an individual buffer
255 *
256 * @client - client who has access to the buffer
257 * @handle - buffer to secure
258 */
259int msm_ion_unsecure_buffer(struct ion_client *client,
260 struct ion_handle *handle);
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700261#else
262static inline int msm_ion_secure_heap(int heap_id)
263{
264 return -ENODEV;
265
266}
267
268static inline int msm_ion_unsecure_heap(int heap_id)
269{
270 return -ENODEV;
271}
272
273static inline int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage)
274{
275 return -ENODEV;
276}
277
278static inline int msm_ion_unsecure_heap_2_0(int heap_id,
279 enum cp_mem_usage usage)
280{
281 return -ENODEV;
282}
283#endif /* CONFIG_ION */
284
285#endif /* __KERNEL */
286
287/* struct ion_flush_data - data passed to ion for flushing caches
288 *
289 * @handle: handle with data to flush
290 * @fd: fd to flush
291 * @vaddr: userspace virtual address mapped with mmap
292 * @offset: offset into the handle to flush
293 * @length: length of handle to flush
294 *
295 * Performs cache operations on the handle. If p is the start address
296 * of the handle, p + offset through p + offset + length will have
297 * the cache operations performed
298 */
299struct ion_flush_data {
300 struct ion_handle *handle;
301 int fd;
302 void *vaddr;
303 unsigned int offset;
304 unsigned int length;
305};
306
307/* struct ion_flag_data - information about flags for this buffer
308 *
309 * @handle: handle to get flags from
310 * @flags: flags of this handle
311 *
312 * Takes handle as an input and outputs the flags from the handle
313 * in the flag field.
314 */
315struct ion_flag_data {
316 struct ion_handle *handle;
317 unsigned long flags;
318};
319
320#define ION_IOC_MSM_MAGIC 'M'
321
322/**
323 * DOC: ION_IOC_CLEAN_CACHES - clean the caches
324 *
325 * Clean the caches of the handle specified.
326 */
327#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MSM_MAGIC, 0, \
328 struct ion_flush_data)
329/**
330 * DOC: ION_IOC_INV_CACHES - invalidate the caches
331 *
332 * Invalidate the caches of the handle specified.
333 */
334#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 1, \
335 struct ion_flush_data)
336/**
337 * DOC: ION_IOC_CLEAN_INV_CACHES - clean and invalidate the caches
338 *
339 * Clean and invalidate the caches of the handle specified.
340 */
341#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 2, \
342 struct ion_flush_data)
343
344/**
345 * DOC: ION_IOC_GET_FLAGS - get the flags of the handle
346 *
347 * Gets the flags of the current handle which indicate cachability,
348 * secure state etc.
349 */
350#define ION_IOC_GET_FLAGS _IOWR(ION_IOC_MSM_MAGIC, 3, \
351 struct ion_flag_data)
352
Laura Abbott6438e532012-07-20 10:10:41 -0700353#endif