blob: 0127edc2d970bdc9a94656caa3f8a1e4dfae2ccb [file] [log] [blame]
Laura Abbott6438e532012-07-20 10:10:41 -07001#ifndef _LINUX_MSM_ION_H
2#define _LINUX_MSM_ION_H
3
4#include <linux/ion.h>
5
Mitchel Humpherys362b52b2012-09-13 10:53:22 -07006enum msm_ion_heap_types {
7 ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
Mitchel Humpherys355a5912013-10-04 14:01:55 -07008 ION_HEAP_TYPE_DMA = ION_HEAP_TYPE_MSM_START,
Mitchel Humpherys362b52b2012-09-13 10:53:22 -07009 ION_HEAP_TYPE_CP,
Laura Abbotta8c373f2013-02-15 09:25:35 -080010 ION_HEAP_TYPE_SECURE_DMA,
Laura Abbottf8a269c2013-04-01 16:26:00 -070011 ION_HEAP_TYPE_REMOVED,
Mitchel Humpherys5348d142013-06-14 17:37:23 -070012 /*
13 * if you add a heap type here you should also add it to
14 * heap_types_info[] in msm_ion.c
15 */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070016};
17
18/**
19 * These are the only ids that should be used for Ion heap ids.
20 * The ids listed are the order in which allocation will be attempted
21 * if specified. Don't swap the order of heap ids unless you know what
22 * you are doing!
23 * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
24 * possible fallbacks)
25 */
26
27enum ion_heap_ids {
28 INVALID_HEAP_ID = -1,
29 ION_CP_MM_HEAP_ID = 8,
30 ION_CP_MFC_HEAP_ID = 12,
31 ION_CP_WB_HEAP_ID = 16, /* 8660 only */
32 ION_CAMERA_HEAP_ID = 20, /* 8660 only */
Mitchel Humpherysf9210422013-03-19 17:16:58 -070033 ION_SYSTEM_CONTIG_HEAP_ID = 21,
Laura Abbott98e8ddc2013-02-09 09:35:30 -080034 ION_ADSP_HEAP_ID = 22,
Neeti Desai9dc9db42012-10-18 17:53:51 -070035 ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070036 ION_SF_HEAP_ID = 24,
Mitchel Humpherys355a5912013-10-04 14:01:55 -070037 ION_SYSTEM_HEAP_ID = 25,
Neeti Desai9dc9db42012-10-18 17:53:51 -070038 ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070039 ION_QSECOM_HEAP_ID = 27,
40 ION_AUDIO_HEAP_ID = 28,
41
42 ION_MM_FIRMWARE_HEAP_ID = 29,
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070043
Adrian Alexei92538592013-03-27 10:53:43 -070044 ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070045};
46
Mitchel Humpherys355a5912013-10-04 14:01:55 -070047/*
48 * The IOMMU heap is deprecated! Here are some aliases for backwards
49 * compatibility:
50 */
51#define ION_IOMMU_HEAP_ID ION_SYSTEM_HEAP_ID
52#define ION_HEAP_TYPE_IOMMU ION_HEAP_TYPE_SYSTEM
53
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070054enum ion_fixed_position {
55 NOT_FIXED,
56 FIXED_LOW,
57 FIXED_MIDDLE,
58 FIXED_HIGH,
59};
60
61enum cp_mem_usage {
62 VIDEO_BITSTREAM = 0x1,
63 VIDEO_PIXEL = 0x2,
64 VIDEO_NONPIXEL = 0x3,
65 MAX_USAGE = 0x4,
66 UNKNOWN = 0x7FFFFFFF,
67};
68
69#define ION_HEAP_CP_MASK (1 << ION_HEAP_TYPE_CP)
Laura Abbott4f6c71d2013-04-02 12:38:20 -070070#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070071
72/**
73 * Flag to use when allocating to indicate that a heap is secure.
74 */
Adrian Alexei54276f62013-04-04 16:18:51 -070075#define ION_FLAG_SECURE (1 << ION_HEAP_ID_RESERVED)
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070076
77/**
Laura Abbott7db4e0b2013-01-03 14:20:16 -080078 * Flag for clients to force contiguous memort allocation
79 *
80 * Use of this flag is carefully monitored!
81 */
Adrian Alexei54276f62013-04-04 16:18:51 -070082#define ION_FLAG_FORCE_CONTIGUOUS (1 << 30)
83
Laura Abbott5d9cca92013-06-06 14:09:41 -070084/*
85 * Used in conjunction with heap which pool memory to force an allocation
86 * to come from the page allocator directly instead of from the pool allocation
87 */
88#define ION_FLAG_POOL_FORCE_ALLOC (1 << 16)
89
Adrian Alexei54276f62013-04-04 16:18:51 -070090/**
91* Deprecated! Please use the corresponding ION_FLAG_*
92*/
93#define ION_SECURE ION_FLAG_SECURE
94#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
Laura Abbott7db4e0b2013-01-03 14:20:16 -080095
96/**
Mitchel Humpherys362b52b2012-09-13 10:53:22 -070097 * Macro should be used with ion_heap_ids defined above.
98 */
99#define ION_HEAP(bit) (1 << (bit))
100
Laura Abbott98e8ddc2013-02-09 09:35:30 -0800101#define ION_ADSP_HEAP_NAME "adsp"
Mitchel Humpherys4ccdb762013-10-04 16:07:56 -0700102#define ION_SYSTEM_HEAP_NAME "system"
103#define ION_VMALLOC_HEAP_NAME ION_SYSTEM_HEAP_NAME
Mitchel Humpherysf9210422013-03-19 17:16:58 -0700104#define ION_KMALLOC_HEAP_NAME "kmalloc"
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700105#define ION_AUDIO_HEAP_NAME "audio"
106#define ION_SF_HEAP_NAME "sf"
107#define ION_MM_HEAP_NAME "mm"
108#define ION_CAMERA_HEAP_NAME "camera_preview"
109#define ION_IOMMU_HEAP_NAME "iommu"
110#define ION_MFC_HEAP_NAME "mfc"
111#define ION_WB_HEAP_NAME "wb"
112#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw"
Neeti Desai9dc9db42012-10-18 17:53:51 -0700113#define ION_PIL1_HEAP_NAME "pil_1"
114#define ION_PIL2_HEAP_NAME "pil_2"
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700115#define ION_QSECOM_HEAP_NAME "qsecom"
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700116
117#define ION_SET_CACHED(__cache) (__cache | ION_FLAG_CACHED)
118#define ION_SET_UNCACHED(__cache) (__cache & ~ION_FLAG_CACHED)
119
120#define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED)
121
122#ifdef __KERNEL__
123
124/*
125 * This flag allows clients when mapping into the IOMMU to specify to
126 * defer un-mapping from the IOMMU until the buffer memory is freed.
127 */
128#define ION_IOMMU_UNMAP_DELAYED 1
129
Laura Abbott93619302012-10-11 11:51:40 -0700130/*
131 * This flag allows clients to defer unsecuring a buffer until the buffer
132 * is actually freed.
133 */
134#define ION_UNSECURE_DELAYED 1
135
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700136/**
137 * struct ion_cp_heap_pdata - defines a content protection heap in the given
138 * platform
139 * @permission_type: Memory ID used to identify the memory to TZ
140 * @align: Alignment requirement for the memory
141 * @secure_base: Base address for securing the heap.
142 * Note: This might be different from actual base address
143 * of this heap in the case of a shared heap.
144 * @secure_size: Memory size for securing the heap.
145 * Note: This might be different from actual size
146 * of this heap in the case of a shared heap.
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700147 * @fixed_position If nonzero, position in the fixed area.
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700148 * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
149 * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
150 * @request_region: function to be called when the number of allocations
151 * goes from 0 -> 1
152 * @release_region: function to be called when the number of allocations
153 * goes from 1 -> 0
154 * @setup_region: function to be called upon ion registration
155 * @memory_type:Memory type used for the heap
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800156 * @allow_nonsecure_alloc: allow non-secure allocations from this heap. For
157 * secure heaps, this flag must be set so allow non-secure
158 * allocations. For non-secure heaps, this flag is ignored.
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700159 *
160 */
161struct ion_cp_heap_pdata {
162 enum ion_permission_type permission_type;
163 unsigned int align;
164 ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
165 size_t secure_size; /* Size used for securing heap when heap is shared*/
Laura Abbott3180a5f2012-08-03 17:31:03 -0700166 int is_cma;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700167 enum ion_fixed_position fixed_position;
168 int iommu_map_all;
169 int iommu_2x_map_domain;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700170 int (*request_region)(void *);
171 int (*release_region)(void *);
172 void *(*setup_region)(void);
173 enum ion_memory_types memory_type;
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800174 int allow_nonsecure_alloc;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700175};
176
177/**
178 * struct ion_co_heap_pdata - defines a carveout heap in the given platform
179 * @adjacent_mem_id: Id of heap that this heap must be adjacent to.
180 * @align: Alignment requirement for the memory
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700181 * @fixed_position If nonzero, position in the fixed area.
182 * @request_region: function to be called when the number of allocations
183 * goes from 0 -> 1
184 * @release_region: function to be called when the number of allocations
185 * goes from 1 -> 0
186 * @setup_region: function to be called upon ion registration
187 * @memory_type:Memory type used for the heap
188 *
189 */
190struct ion_co_heap_pdata {
191 int adjacent_mem_id;
192 unsigned int align;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700193 enum ion_fixed_position fixed_position;
194 int (*request_region)(void *);
195 int (*release_region)(void *);
196 void *(*setup_region)(void);
197 enum ion_memory_types memory_type;
198};
199
Laura Abbotteb0502e2013-10-01 14:18:08 -0700200/*
201 * struct ion_cma_pdata - extra data for CMA regions
202 * @default_prefetch_size - default size to use for prefetching
203 */
204struct ion_cma_pdata {
205 unsigned long default_prefetch_size;
206};
207
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700208#ifdef CONFIG_ION
209/**
Laura Abbottca2f5db2013-03-21 11:10:05 -0700210 * msm_ion_client_create - allocate a client using the ion_device specified in
211 * drivers/gpu/ion/msm/msm_ion.c
212 *
213 * heap_mask and name are the same as ion_client_create, return values
214 * are the same as ion_client_create.
215 */
216
217struct ion_client *msm_ion_client_create(unsigned int heap_mask,
218 const char *name);
219
220/**
221 * ion_handle_get_flags - get the flags for a given handle
222 *
223 * @client - client who allocated the handle
224 * @handle - handle to get the flags
225 * @flags - pointer to store the flags
226 *
227 * Gets the current flags for a handle. These flags indicate various options
228 * of the buffer (caching, security, etc.)
229 */
230int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
231 unsigned long *flags);
232
233
234/**
235 * ion_map_iommu - map the given handle into an iommu
236 *
237 * @client - client who allocated the handle
238 * @handle - handle to map
239 * @domain_num - domain number to map to
240 * @partition_num - partition number to allocate iova from
241 * @align - alignment for the iova
242 * @iova_length - length of iova to map. If the iova length is
243 * greater than the handle length, the remaining
244 * address space will be mapped to a dummy buffer.
245 * @iova - pointer to store the iova address
246 * @buffer_size - pointer to store the size of the buffer
247 * @flags - flags for options to map
248 * @iommu_flags - flags specific to the iommu.
249 *
250 * Maps the handle into the iova space specified via domain number. Iova
251 * will be allocated from the partition specified via partition_num.
252 * Returns 0 on success, negative value on error.
253 */
254int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
255 int domain_num, int partition_num, unsigned long align,
256 unsigned long iova_length, unsigned long *iova,
257 unsigned long *buffer_size,
258 unsigned long flags, unsigned long iommu_flags);
259
260
261/**
262 * ion_handle_get_size - get the allocated size of a given handle
263 *
264 * @client - client who allocated the handle
265 * @handle - handle to get the size
266 * @size - pointer to store the size
267 *
268 * gives the allocated size of a handle. returns 0 on success, negative
269 * value on error
270 *
271 * NOTE: This is intended to be used only to get a size to pass to map_iommu.
272 * You should *NOT* rely on this for any other usage.
273 */
274
275int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
276 unsigned long *size);
277
278/**
279 * ion_unmap_iommu - unmap the handle from an iommu
280 *
281 * @client - client who allocated the handle
282 * @handle - handle to unmap
283 * @domain_num - domain to unmap from
284 * @partition_num - partition to unmap from
285 *
286 * Decrement the reference count on the iommu mapping. If the count is
287 * 0, the mapping will be removed from the iommu.
288 */
289void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
290 int domain_num, int partition_num);
291
292
293/**
294 * ion_secure_heap - secure a heap
295 *
296 * @client - a client that has allocated from the heap heap_id
297 * @heap_id - heap id to secure.
298 * @version - version of content protection
299 * @data - extra data needed for protection
300 *
301 * Secure a heap
302 * Returns 0 on success
303 */
304int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
305 void *data);
306
307/**
308 * ion_unsecure_heap - un-secure a heap
309 *
310 * @client - a client that has allocated from the heap heap_id
311 * @heap_id - heap id to un-secure.
312 * @version - version of content protection
313 * @data - extra data needed for protection
314 *
315 * Un-secure a heap
316 * Returns 0 on success
317 */
318int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
319 void *data);
320
321/**
322 * msm_ion_do_cache_op - do cache operations.
323 *
324 * @client - pointer to ION client.
325 * @handle - pointer to buffer handle.
326 * @vaddr - virtual address to operate on.
327 * @len - Length of data to do cache operation on.
328 * @cmd - Cache operation to perform:
329 * ION_IOC_CLEAN_CACHES
330 * ION_IOC_INV_CACHES
331 * ION_IOC_CLEAN_INV_CACHES
332 *
333 * Returns 0 on success
334 */
335int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
336 void *vaddr, unsigned long len, unsigned int cmd);
337
338/**
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700339 * msm_ion_secure_heap - secure a heap. Wrapper around ion_secure_heap.
340 *
Laura Abbottca2f5db2013-03-21 11:10:05 -0700341 * @heap_id - heap id to secure.
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700342 *
343 * Secure a heap
344 * Returns 0 on success
345 */
346int msm_ion_secure_heap(int heap_id);
347
348/**
349 * msm_ion_unsecure_heap - unsecure a heap. Wrapper around ion_unsecure_heap.
350 *
351 * @heap_id - heap id to secure.
352 *
353 * Un-secure a heap
354 * Returns 0 on success
355 */
356int msm_ion_unsecure_heap(int heap_id);
357
358/**
359 * msm_ion_secure_heap_2_0 - secure a heap using 2.0 APIs
360 * Wrapper around ion_secure_heap.
361 *
362 * @heap_id - heap id to secure.
363 * @usage - usage hint to TZ
364 *
365 * Secure a heap
366 * Returns 0 on success
367 */
368int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage);
369
370/**
371 * msm_ion_unsecure_heap - unsecure a heap secured with 3.0 APIs.
372 * Wrapper around ion_unsecure_heap.
373 *
374 * @heap_id - heap id to secure.
375 * @usage - usage hint to TZ
376 *
377 * Un-secure a heap
378 * Returns 0 on success
379 */
380int msm_ion_unsecure_heap_2_0(int heap_id, enum cp_mem_usage usage);
Laura Abbott93619302012-10-11 11:51:40 -0700381
382/**
383 * msm_ion_secure_buffer - secure an individual buffer
384 *
385 * @client - client who has access to the buffer
386 * @handle - buffer to secure
387 * @usage - usage hint to TZ
388 * @flags - flags for the securing
389 */
390int msm_ion_secure_buffer(struct ion_client *client, struct ion_handle *handle,
391 enum cp_mem_usage usage, int flags);
392
393/**
394 * msm_ion_unsecure_buffer - unsecure an individual buffer
395 *
396 * @client - client who has access to the buffer
397 * @handle - buffer to secure
398 */
399int msm_ion_unsecure_buffer(struct ion_client *client,
400 struct ion_handle *handle);
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700401#else
Laura Abbottca2f5db2013-03-21 11:10:05 -0700402static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
403 const char *name)
404{
405 return ERR_PTR(-ENODEV);
406}
407
408static inline int ion_map_iommu(struct ion_client *client,
409 struct ion_handle *handle, int domain_num,
410 int partition_num, unsigned long align,
411 unsigned long iova_length, unsigned long *iova,
412 unsigned long *buffer_size,
413 unsigned long flags,
414 unsigned long iommu_flags)
415{
416 return -ENODEV;
417}
418
419static inline int ion_handle_get_size(struct ion_client *client,
420 struct ion_handle *handle, unsigned long *size)
421{
422 return -ENODEV;
423}
424
425static inline void ion_unmap_iommu(struct ion_client *client,
426 struct ion_handle *handle, int domain_num,
427 int partition_num)
428{
429 return;
430}
431
432static inline int ion_secure_heap(struct ion_device *dev, int heap_id,
433 int version, void *data)
434{
435 return -ENODEV;
436
437}
438
439static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id,
440 int version, void *data)
441{
442 return -ENODEV;
443}
444
445static inline void ion_mark_dangling_buffers_locked(struct ion_device *dev)
446{
447}
448
449static inline int msm_ion_do_cache_op(struct ion_client *client,
450 struct ion_handle *handle, void *vaddr,
451 unsigned long len, unsigned int cmd)
452{
453 return -ENODEV;
454}
455
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700456static inline int msm_ion_secure_heap(int heap_id)
457{
458 return -ENODEV;
459
460}
461
462static inline int msm_ion_unsecure_heap(int heap_id)
463{
464 return -ENODEV;
465}
466
467static inline int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage)
468{
469 return -ENODEV;
470}
471
472static inline int msm_ion_unsecure_heap_2_0(int heap_id,
473 enum cp_mem_usage usage)
474{
475 return -ENODEV;
476}
Mitchel Humpherys782653e2013-02-25 18:54:53 -0800477
478static inline int msm_ion_secure_buffer(struct ion_client *client,
479 struct ion_handle *handle,
480 enum cp_mem_usage usage,
481 int flags)
482{
483 return -ENODEV;
484}
485
486static inline int msm_ion_unsecure_buffer(struct ion_client *client,
487 struct ion_handle *handle)
488{
489 return -ENODEV;
490}
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700491#endif /* CONFIG_ION */
492
493#endif /* __KERNEL */
494
495/* struct ion_flush_data - data passed to ion for flushing caches
496 *
497 * @handle: handle with data to flush
498 * @fd: fd to flush
499 * @vaddr: userspace virtual address mapped with mmap
500 * @offset: offset into the handle to flush
501 * @length: length of handle to flush
502 *
503 * Performs cache operations on the handle. If p is the start address
504 * of the handle, p + offset through p + offset + length will have
505 * the cache operations performed
506 */
507struct ion_flush_data {
508 struct ion_handle *handle;
509 int fd;
510 void *vaddr;
511 unsigned int offset;
512 unsigned int length;
513};
514
Laura Abbotte04b35182013-06-27 18:58:34 -0700515struct ion_prefetch_data {
516 int heap_id;
517 unsigned long len;
518};
519
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700520#define ION_IOC_MSM_MAGIC 'M'
521
522/**
523 * DOC: ION_IOC_CLEAN_CACHES - clean the caches
524 *
525 * Clean the caches of the handle specified.
526 */
527#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MSM_MAGIC, 0, \
528 struct ion_flush_data)
529/**
530 * DOC: ION_IOC_INV_CACHES - invalidate the caches
531 *
532 * Invalidate the caches of the handle specified.
533 */
534#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 1, \
535 struct ion_flush_data)
536/**
537 * DOC: ION_IOC_CLEAN_INV_CACHES - clean and invalidate the caches
538 *
539 * Clean and invalidate the caches of the handle specified.
540 */
541#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 2, \
542 struct ion_flush_data)
543
Laura Abbotte04b35182013-06-27 18:58:34 -0700544#define ION_IOC_PREFETCH _IOWR(ION_IOC_MSM_MAGIC, 3, \
545 struct ion_prefetch_data)
546
547#define ION_IOC_DRAIN _IOWR(ION_IOC_MSM_MAGIC, 4, \
548 struct ion_prefetch_data)
549
Laura Abbott6438e532012-07-20 10:10:41 -0700550#endif