blob: 95c4e6a38544ce0b6b19aacfbba08e7483906a8d [file] [log] [blame]
Laura Abbott6438e532012-07-20 10:10:41 -07001/*
Laura Abbott6438e532012-07-20 10:10:41 -07002 *
Laura Abbott7db4e0b2013-01-03 14:20:16 -08003 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Laura Abbott6438e532012-07-20 10:10:41 -07004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#ifndef _LINUX_MSM_ION_H
17#define _LINUX_MSM_ION_H
18
19#include <linux/ion.h>
20
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070021enum msm_ion_heap_types {
22 ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
23 ION_HEAP_TYPE_IOMMU = ION_HEAP_TYPE_MSM_START,
24 ION_HEAP_TYPE_CP,
Laura Abbotta8c373f2013-02-15 09:25:35 -080025 ION_HEAP_TYPE_SECURE_DMA,
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070026};
27
28/**
29 * These are the only ids that should be used for Ion heap ids.
30 * The ids listed are the order in which allocation will be attempted
31 * if specified. Don't swap the order of heap ids unless you know what
32 * you are doing!
33 * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
34 * possible fallbacks)
35 */
36
37enum ion_heap_ids {
38 INVALID_HEAP_ID = -1,
39 ION_CP_MM_HEAP_ID = 8,
40 ION_CP_MFC_HEAP_ID = 12,
41 ION_CP_WB_HEAP_ID = 16, /* 8660 only */
42 ION_CAMERA_HEAP_ID = 20, /* 8660 only */
Mitchel Humpherysf9210422013-03-19 17:16:58 -070043 ION_SYSTEM_CONTIG_HEAP_ID = 21,
Laura Abbott98e8ddc2013-02-09 09:35:30 -080044 ION_ADSP_HEAP_ID = 22,
Neeti Desai9dc9db42012-10-18 17:53:51 -070045 ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070046 ION_SF_HEAP_ID = 24,
47 ION_IOMMU_HEAP_ID = 25,
Neeti Desai9dc9db42012-10-18 17:53:51 -070048 ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070049 ION_QSECOM_HEAP_ID = 27,
50 ION_AUDIO_HEAP_ID = 28,
51
52 ION_MM_FIRMWARE_HEAP_ID = 29,
53 ION_SYSTEM_HEAP_ID = 30,
54
55 ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_SECURE flag */
56};
57
58enum ion_fixed_position {
59 NOT_FIXED,
60 FIXED_LOW,
61 FIXED_MIDDLE,
62 FIXED_HIGH,
63};
64
65enum cp_mem_usage {
66 VIDEO_BITSTREAM = 0x1,
67 VIDEO_PIXEL = 0x2,
68 VIDEO_NONPIXEL = 0x3,
69 MAX_USAGE = 0x4,
70 UNKNOWN = 0x7FFFFFFF,
71};
72
73#define ION_HEAP_CP_MASK (1 << ION_HEAP_TYPE_CP)
74
75/**
76 * Flag to use when allocating to indicate that a heap is secure.
77 */
78#define ION_SECURE (1 << ION_HEAP_ID_RESERVED)
79
80/**
Laura Abbott7db4e0b2013-01-03 14:20:16 -080081 * Flag for clients to force contiguous memort allocation
82 *
83 * Use of this flag is carefully monitored!
84 */
85#define ION_FORCE_CONTIGUOUS (1 << 30)
86
87/**
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070088 * Macro should be used with ion_heap_ids defined above.
89 */
90#define ION_HEAP(bit) (1 << (bit))
91
Laura Abbott98e8ddc2013-02-09 09:35:30 -080092#define ION_ADSP_HEAP_NAME "adsp"
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070093#define ION_VMALLOC_HEAP_NAME "vmalloc"
Mitchel Humpherysf9210422013-03-19 17:16:58 -070094#define ION_KMALLOC_HEAP_NAME "kmalloc"
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070095#define ION_AUDIO_HEAP_NAME "audio"
96#define ION_SF_HEAP_NAME "sf"
97#define ION_MM_HEAP_NAME "mm"
98#define ION_CAMERA_HEAP_NAME "camera_preview"
99#define ION_IOMMU_HEAP_NAME "iommu"
100#define ION_MFC_HEAP_NAME "mfc"
101#define ION_WB_HEAP_NAME "wb"
102#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw"
Neeti Desai9dc9db42012-10-18 17:53:51 -0700103#define ION_PIL1_HEAP_NAME "pil_1"
104#define ION_PIL2_HEAP_NAME "pil_2"
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700105#define ION_QSECOM_HEAP_NAME "qsecom"
106#define ION_FMEM_HEAP_NAME "fmem"
107
108#define ION_SET_CACHED(__cache) (__cache | ION_FLAG_CACHED)
109#define ION_SET_UNCACHED(__cache) (__cache & ~ION_FLAG_CACHED)
110
111#define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED)
112
113#ifdef __KERNEL__
114
115/*
116 * This flag allows clients when mapping into the IOMMU to specify to
117 * defer un-mapping from the IOMMU until the buffer memory is freed.
118 */
119#define ION_IOMMU_UNMAP_DELAYED 1
120
Laura Abbott93619302012-10-11 11:51:40 -0700121/*
122 * This flag allows clients to defer unsecuring a buffer until the buffer
123 * is actually freed.
124 */
125#define ION_UNSECURE_DELAYED 1
126
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700127/**
128 * struct ion_cp_heap_pdata - defines a content protection heap in the given
129 * platform
130 * @permission_type: Memory ID used to identify the memory to TZ
131 * @align: Alignment requirement for the memory
132 * @secure_base: Base address for securing the heap.
133 * Note: This might be different from actual base address
134 * of this heap in the case of a shared heap.
135 * @secure_size: Memory size for securing the heap.
136 * Note: This might be different from actual size
137 * of this heap in the case of a shared heap.
138 * @reusable Flag indicating whether this heap is reusable of not.
139 * (see FMEM)
140 * @mem_is_fmem Flag indicating whether this memory is coming from fmem
141 * or not.
142 * @fixed_position If nonzero, position in the fixed area.
143 * @virt_addr: Virtual address used when using fmem.
144 * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
145 * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
146 * @request_region: function to be called when the number of allocations
147 * goes from 0 -> 1
148 * @release_region: function to be called when the number of allocations
149 * goes from 1 -> 0
150 * @setup_region: function to be called upon ion registration
151 * @memory_type:Memory type used for the heap
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800152 * @allow_nonsecure_alloc: allow non-secure allocations from this heap. For
153 * secure heaps, this flag must be set so allow non-secure
154 * allocations. For non-secure heaps, this flag is ignored.
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700155 *
156 */
157struct ion_cp_heap_pdata {
158 enum ion_permission_type permission_type;
159 unsigned int align;
160 ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
161 size_t secure_size; /* Size used for securing heap when heap is shared*/
162 int reusable;
163 int mem_is_fmem;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700164 int is_cma;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700165 enum ion_fixed_position fixed_position;
166 int iommu_map_all;
167 int iommu_2x_map_domain;
Laura Abbott871ad422013-03-13 14:28:19 -0700168 void *virt_addr;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700169 int (*request_region)(void *);
170 int (*release_region)(void *);
171 void *(*setup_region)(void);
172 enum ion_memory_types memory_type;
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800173 int allow_nonsecure_alloc;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700174};
175
176/**
177 * struct ion_co_heap_pdata - defines a carveout heap in the given platform
178 * @adjacent_mem_id: Id of heap that this heap must be adjacent to.
179 * @align: Alignment requirement for the memory
180 * @mem_is_fmem Flag indicating whether this memory is coming from fmem
181 * or not.
182 * @fixed_position If nonzero, position in the fixed area.
183 * @request_region: function to be called when the number of allocations
184 * goes from 0 -> 1
185 * @release_region: function to be called when the number of allocations
186 * goes from 1 -> 0
187 * @setup_region: function to be called upon ion registration
188 * @memory_type:Memory type used for the heap
189 *
190 */
191struct ion_co_heap_pdata {
192 int adjacent_mem_id;
193 unsigned int align;
194 int mem_is_fmem;
195 enum ion_fixed_position fixed_position;
196 int (*request_region)(void *);
197 int (*release_region)(void *);
198 void *(*setup_region)(void);
199 enum ion_memory_types memory_type;
200};
201
202#ifdef CONFIG_ION
203/**
204 * msm_ion_secure_heap - secure a heap. Wrapper around ion_secure_heap.
205 *
206 * @heap_id - heap id to secure.
207 *
208 * Secure a heap
209 * Returns 0 on success
210 */
211int msm_ion_secure_heap(int heap_id);
212
213/**
214 * msm_ion_unsecure_heap - unsecure a heap. Wrapper around ion_unsecure_heap.
215 *
216 * @heap_id - heap id to secure.
217 *
218 * Un-secure a heap
219 * Returns 0 on success
220 */
221int msm_ion_unsecure_heap(int heap_id);
222
223/**
224 * msm_ion_secure_heap_2_0 - secure a heap using 2.0 APIs
225 * Wrapper around ion_secure_heap.
226 *
227 * @heap_id - heap id to secure.
228 * @usage - usage hint to TZ
229 *
230 * Secure a heap
231 * Returns 0 on success
232 */
233int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage);
234
235/**
236 * msm_ion_unsecure_heap - unsecure a heap secured with 3.0 APIs.
237 * Wrapper around ion_unsecure_heap.
238 *
239 * @heap_id - heap id to secure.
240 * @usage - usage hint to TZ
241 *
242 * Un-secure a heap
243 * Returns 0 on success
244 */
245int msm_ion_unsecure_heap_2_0(int heap_id, enum cp_mem_usage usage);
Laura Abbott93619302012-10-11 11:51:40 -0700246
247/**
248 * msm_ion_secure_buffer - secure an individual buffer
249 *
250 * @client - client who has access to the buffer
251 * @handle - buffer to secure
252 * @usage - usage hint to TZ
253 * @flags - flags for the securing
254 */
255int msm_ion_secure_buffer(struct ion_client *client, struct ion_handle *handle,
256 enum cp_mem_usage usage, int flags);
257
258/**
259 * msm_ion_unsecure_buffer - unsecure an individual buffer
260 *
261 * @client - client who has access to the buffer
262 * @handle - buffer to secure
263 */
264int msm_ion_unsecure_buffer(struct ion_client *client,
265 struct ion_handle *handle);
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700266#else
267static inline int msm_ion_secure_heap(int heap_id)
268{
269 return -ENODEV;
270
271}
272
273static inline int msm_ion_unsecure_heap(int heap_id)
274{
275 return -ENODEV;
276}
277
278static inline int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage)
279{
280 return -ENODEV;
281}
282
283static inline int msm_ion_unsecure_heap_2_0(int heap_id,
284 enum cp_mem_usage usage)
285{
286 return -ENODEV;
287}
Mitchel Humpherys782653e2013-02-25 18:54:53 -0800288
289static inline int msm_ion_secure_buffer(struct ion_client *client,
290 struct ion_handle *handle,
291 enum cp_mem_usage usage,
292 int flags)
293{
294 return -ENODEV;
295}
296
297static inline int msm_ion_unsecure_buffer(struct ion_client *client,
298 struct ion_handle *handle)
299{
300 return -ENODEV;
301}
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700302#endif /* CONFIG_ION */
303
304#endif /* __KERNEL */
305
306/* struct ion_flush_data - data passed to ion for flushing caches
307 *
308 * @handle: handle with data to flush
309 * @fd: fd to flush
310 * @vaddr: userspace virtual address mapped with mmap
311 * @offset: offset into the handle to flush
312 * @length: length of handle to flush
313 *
314 * Performs cache operations on the handle. If p is the start address
315 * of the handle, p + offset through p + offset + length will have
316 * the cache operations performed
317 */
318struct ion_flush_data {
319 struct ion_handle *handle;
320 int fd;
321 void *vaddr;
322 unsigned int offset;
323 unsigned int length;
324};
325
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700326#define ION_IOC_MSM_MAGIC 'M'
327
328/**
329 * DOC: ION_IOC_CLEAN_CACHES - clean the caches
330 *
331 * Clean the caches of the handle specified.
332 */
333#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MSM_MAGIC, 0, \
334 struct ion_flush_data)
335/**
336 * DOC: ION_IOC_INV_CACHES - invalidate the caches
337 *
338 * Invalidate the caches of the handle specified.
339 */
340#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 1, \
341 struct ion_flush_data)
342/**
343 * DOC: ION_IOC_CLEAN_INV_CACHES - clean and invalidate the caches
344 *
345 * Clean and invalidate the caches of the handle specified.
346 */
347#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 2, \
348 struct ion_flush_data)
349
Laura Abbott6438e532012-07-20 10:10:41 -0700350#endif