blob: 2cced6ddf46605f925a6dd2b2a5ff5077ef827e5 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * include/linux/ion.h
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#ifndef _LINUX_ION_H
18#define _LINUX_ION_H
19
Laura Abbottabcb6f72011-10-04 16:26:49 -070020#include <linux/ioctl.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070021#include <linux/types.h>
22
Laura Abbottabcb6f72011-10-04 16:26:49 -070023
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070024struct ion_handle;
25/**
26 * enum ion_heap_types - list of all possible types of heaps
Iliyan Malchevf22301562011-07-06 16:53:21 -070027 * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
28 * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
29 * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
Olav Hauganee0f7802011-12-19 13:28:57 -080030 * carveout heap, allocations are physically
31 * contiguous
Iliyan Malchevf22301562011-07-06 16:53:21 -070032 * @ION_HEAP_END: helper for iterating over heaps
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070033 */
34enum ion_heap_type {
35 ION_HEAP_TYPE_SYSTEM,
36 ION_HEAP_TYPE_SYSTEM_CONTIG,
37 ION_HEAP_TYPE_CARVEOUT,
Laura Abbott8c017362011-09-22 20:59:12 -070038 ION_HEAP_TYPE_IOMMU,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070039 ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
40 are at the end of this enum */
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070041 ION_NUM_HEAPS,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070042};
43
Iliyan Malchevf22301562011-07-06 16:53:21 -070044#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
45#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
46#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070047
Laura Abbotta2e93632011-08-19 13:36:32 -070048
49/**
50 * These are the only ids that should be used for Ion heap ids.
51 * The ids listed are the order in which allocation will be attempted
52 * if specified. Don't swap the order of heap ids unless you know what
53 * you are doing!
54 */
55
56enum ion_heap_ids {
57 ION_HEAP_SYSTEM_ID,
58 ION_HEAP_SYSTEM_CONTIG_ID,
59 ION_HEAP_EBI_ID,
60 ION_HEAP_SMI_ID,
Laura Abbott2d1760b2011-09-29 21:31:24 -070061 ION_HEAP_ADSP_ID,
62 ION_HEAP_AUDIO_ID,
Laura Abbott8c017362011-09-22 20:59:12 -070063 ION_HEAP_IOMMU_ID,
Laura Abbotta2e93632011-08-19 13:36:32 -070064};
65
66#define ION_KMALLOC_HEAP_NAME "kmalloc"
67#define ION_VMALLOC_HEAP_NAME "vmalloc"
68#define ION_EBI1_HEAP_NAME "EBI1"
Laura Abbott2d1760b2011-09-29 21:31:24 -070069#define ION_ADSP_HEAP_NAME "adsp"
Laura Abbott63cfd7e2011-10-10 18:21:01 -070070#define ION_SMI_HEAP_NAME "smi"
Laura Abbott8c017362011-09-22 20:59:12 -070071#define ION_IOMMU_HEAP_NAME "iommu"
Laura Abbotta2e93632011-08-19 13:36:32 -070072
Laura Abbott894fd582011-08-19 13:33:56 -070073#define CACHED 1
74#define UNCACHED 0
75
76#define ION_CACHE_SHIFT 0
77
78#define ION_SET_CACHE(__cache) ((__cache) << ION_CACHE_SHIFT)
79
Laura Abbott35412032011-09-29 09:50:06 -070080#define ION_IS_CACHED(__flags) ((__flags) & (1 << ION_CACHE_SHIFT))
81
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070082#ifdef __KERNEL__
Laura Abbott65576962011-10-31 12:13:25 -070083#include <linux/err.h>
Laura Abbottcffdff52011-09-23 10:40:19 -070084#include <mach/ion.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070085struct ion_device;
86struct ion_heap;
87struct ion_mapper;
88struct ion_client;
89struct ion_buffer;
90
91/* This should be removed some day when phys_addr_t's are fully
92 plumbed in the kernel, and all instances of ion_phys_addr_t should
93 be converted to phys_addr_t. For the time being many kernel interfaces
94 do not accept phys_addr_t's that would have to */
95#define ion_phys_addr_t unsigned long
96
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070097/**
98 * struct ion_platform_heap - defines a heap in the given platform
99 * @type: type of the heap from ion_heap_type enum
Olav Hauganee0f7802011-12-19 13:28:57 -0800100 * @id: unique identifier for heap. When allocating (lower numbers
101 * will be allocated from first)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700102 * @name: used for debug purposes
103 * @base: base address of heap in physical memory if applicable
104 * @size: size of the heap in bytes if applicable
Alex Bird8a3ede32011-11-07 12:33:42 -0800105 * @request_region: function to be called when the number of allocations goes
106 * from 0 -> 1
107 * @release_region: function to be called when the number of allocations goes
108 * from 1 -> 0
109 * @setup_region: function to be called upon ion registration
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700110 *
111 * Provided by the board file.
112 */
113struct ion_platform_heap {
114 enum ion_heap_type type;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700115 unsigned int id;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700116 const char *name;
117 ion_phys_addr_t base;
118 size_t size;
Laura Abbotta2e93632011-08-19 13:36:32 -0700119 enum ion_memory_types memory_type;
Olav Hauganee0f7802011-12-19 13:28:57 -0800120 int (*request_region)(void *);
121 int (*release_region)(void *);
Alex Bird8a3ede32011-11-07 12:33:42 -0800122 void *(*setup_region)(void);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700123};
124
125/**
126 * struct ion_platform_data - array of platform heaps passed from board file
Alex Bird27ca6612011-11-01 14:40:06 -0700127 * @nr: number of structures in the array
128 * @request_region: function to be called when the number of allocations goes
129 * from 0 -> 1
130 * @release_region: function to be called when the number of allocations goes
131 * from 1 -> 0
132 * @setup_region: function to be called upon ion registration
133 * @heaps: array of platform_heap structions
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700134 *
135 * Provided by the board file in the form of platform data to a platform device.
136 */
137struct ion_platform_data {
138 int nr;
Olav Hauganee0f7802011-12-19 13:28:57 -0800139 int (*request_region)(void *);
140 int (*release_region)(void *);
Alex Bird27ca6612011-11-01 14:40:06 -0700141 void *(*setup_region)(void);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700142 struct ion_platform_heap heaps[];
143};
144
Jordan Crouse8cd48322011-10-12 17:05:19 -0600145#ifdef CONFIG_ION
146
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700147/**
148 * ion_client_create() - allocate a client and returns it
149 * @dev: the global ion device
150 * @heap_mask: mask of heaps this client can allocate from
151 * @name: used for debugging
152 */
153struct ion_client *ion_client_create(struct ion_device *dev,
154 unsigned int heap_mask, const char *name);
155
156/**
Laura Abbott302911d2011-08-15 17:12:57 -0700157 * msm_ion_client_create - allocate a client using the ion_device specified in
158 * drivers/gpu/ion/msm/msm_ion.c
159 *
160 * heap_mask and name are the same as ion_client_create, return values
161 * are the same as ion_client_create.
162 */
163
164struct ion_client *msm_ion_client_create(unsigned int heap_mask,
165 const char *name);
166
167/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700168 * ion_client_destroy() - free's a client and all it's handles
169 * @client: the client
170 *
171 * Free the provided client and all it's resources including
172 * any handles it is holding.
173 */
174void ion_client_destroy(struct ion_client *client);
175
176/**
177 * ion_alloc - allocate ion memory
178 * @client: the client
179 * @len: size of the allocation
180 * @align: requested allocation alignment, lots of hardware blocks have
181 * alignment requirements of some kind
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700182 * @flags: mask of heaps to allocate from, if multiple bits are set
183 * heaps will be tried in order from lowest to highest order bit
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700184 *
185 * Allocate memory in one of the heaps provided in heap mask and return
186 * an opaque handle to it.
187 */
188struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
189 size_t align, unsigned int flags);
190
191/**
192 * ion_free - free a handle
193 * @client: the client
194 * @handle: the handle to free
195 *
196 * Free the provided handle.
197 */
198void ion_free(struct ion_client *client, struct ion_handle *handle);
199
200/**
201 * ion_phys - returns the physical address and len of a handle
202 * @client: the client
203 * @handle: the handle
204 * @addr: a pointer to put the address in
205 * @len: a pointer to put the length in
206 *
207 * This function queries the heap for a particular handle to get the
208 * handle's physical address. It't output is only correct if
209 * a heap returns physically contiguous memory -- in other cases
210 * this api should not be implemented -- ion_map_dma should be used
211 * instead. Returns -EINVAL if the handle is invalid. This has
212 * no implications on the reference counting of the handle --
213 * the returned value may not be valid if the caller is not
214 * holding a reference.
215 */
216int ion_phys(struct ion_client *client, struct ion_handle *handle,
217 ion_phys_addr_t *addr, size_t *len);
218
219/**
220 * ion_map_kernel - create mapping for the given handle
221 * @client: the client
222 * @handle: handle to map
Laura Abbott894fd582011-08-19 13:33:56 -0700223 * @flags: flags for this mapping
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700224 *
225 * Map the given handle into the kernel and return a kernel address that
Laura Abbott894fd582011-08-19 13:33:56 -0700226 * can be used to access this address. If no flags are specified, this
227 * will return a non-secure uncached mapping.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700228 */
Laura Abbott894fd582011-08-19 13:33:56 -0700229void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
230 unsigned long flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700231
232/**
233 * ion_unmap_kernel() - destroy a kernel mapping for a handle
234 * @client: the client
235 * @handle: handle to unmap
236 */
237void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
238
239/**
240 * ion_map_dma - create a dma mapping for a given handle
241 * @client: the client
242 * @handle: handle to map
243 *
244 * Return an sglist describing the given handle
245 */
246struct scatterlist *ion_map_dma(struct ion_client *client,
Laura Abbott894fd582011-08-19 13:33:56 -0700247 struct ion_handle *handle,
248 unsigned long flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700249
250/**
251 * ion_unmap_dma() - destroy a dma mapping for a handle
252 * @client: the client
253 * @handle: handle to unmap
254 */
255void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
256
257/**
258 * ion_share() - given a handle, obtain a buffer to pass to other clients
259 * @client: the client
260 * @handle: the handle to share
261 *
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700262 * Given a handle, return a buffer, which exists in a global name
263 * space, and can be passed to other clients. Should be passed into ion_import
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700264 * to obtain a new handle for this buffer.
Iliyan Malchev3fe24362011-08-09 14:42:08 -0700265 *
266 * NOTE: This function does do not an extra reference. The burden is on the
267 * caller to make sure the buffer doesn't go away while it's being passed to
268 * another client. That is, ion_free should not be called on this handle until
269 * the buffer has been imported into the other client.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700270 */
271struct ion_buffer *ion_share(struct ion_client *client,
272 struct ion_handle *handle);
273
274/**
275 * ion_import() - given an buffer in another client, import it
276 * @client: this blocks client
277 * @buffer: the buffer to import (as obtained from ion_share)
278 *
279 * Given a buffer, add it to the client and return the handle to use to refer
280 * to it further. This is called to share a handle from one kernel client to
281 * another.
282 */
283struct ion_handle *ion_import(struct ion_client *client,
284 struct ion_buffer *buffer);
285
286/**
287 * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
288 * @client: this blocks client
289 * @fd: the fd
290 *
291 * A helper function for drivers that will be recieving ion buffers shared
292 * with them from userspace. These buffers are represented by a file
293 * descriptor obtained as the return from the ION_IOC_SHARE ioctl.
294 * This function coverts that fd into the underlying buffer, and returns
295 * the handle to use to refer to it further.
296 */
297struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700298
Laura Abbott273dd8e2011-10-12 14:26:33 -0700299/**
300 * ion_handle_get_flags - get the flags for a given handle
301 *
302 * @client - client who allocated the handle
303 * @handle - handle to get the flags
304 * @flags - pointer to store the flags
305 *
306 * Gets the current flags for a handle. These flags indicate various options
307 * of the buffer (caching, security, etc.)
308 */
309int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
310 unsigned long *flags);
311
Laura Abbott8c017362011-09-22 20:59:12 -0700312
313/**
314 * ion_map_iommu - map the given handle into an iommu
315 *
316 * @client - client who allocated the handle
317 * @handle - handle to map
318 * @domain_num - domain number to map to
319 * @partition_num - partition number to allocate iova from
320 * @align - alignment for the iova
321 * @iova_length - length of iova to map. If the iova length is
322 * greater than the handle length, the remaining
323 * address space will be mapped to a dummy buffer.
324 * @iova - pointer to store the iova address
325 * @buffer_size - pointer to store the size of the buffer
326 * @flags - flags for options to map
327 *
328 * Maps the handle into the iova space specified via domain number. Iova
329 * will be allocated from the partition specified via partition_num.
330 * Returns 0 on success, negative value on error.
331 */
332int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
333 int domain_num, int partition_num, unsigned long align,
334 unsigned long iova_length, unsigned long *iova,
335 unsigned long *buffer_size,
336 unsigned long flags);
337
338
339/**
340 * ion_handle_get_size - get the allocated size of a given handle
341 *
342 * @client - client who allocated the handle
343 * @handle - handle to get the size
344 * @size - pointer to store the size
345 *
346 * gives the allocated size of a handle. returns 0 on success, negative
347 * value on error
348 *
349 * NOTE: This is intended to be used only to get a size to pass to map_iommu.
350 * You should *NOT* rely on this for any other usage.
351 */
352
353int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
354 unsigned long *size);
355
356/**
357 * ion_unmap_iommu - unmap the handle from an iommu
358 *
359 * @client - client who allocated the handle
360 * @handle - handle to unmap
361 * @domain_num - domain to unmap from
362 * @partition_num - partition to unmap from
363 *
364 * Decrement the reference count on the iommu mapping. If the count is
365 * 0, the mapping will be removed from the iommu.
366 */
367void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
368 int domain_num, int partition_num);
369
370
Jordan Crouse8cd48322011-10-12 17:05:19 -0600371#else
372static inline struct ion_client *ion_client_create(struct ion_device *dev,
373 unsigned int heap_mask, const char *name)
374{
375 return ERR_PTR(-ENODEV);
376}
Laura Abbott273dd8e2011-10-12 14:26:33 -0700377
Jordan Crouse8cd48322011-10-12 17:05:19 -0600378static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
379 const char *name)
380{
381 return ERR_PTR(-ENODEV);
382}
383
384static inline void ion_client_destroy(struct ion_client *client) { }
385
386static inline struct ion_handle *ion_alloc(struct ion_client *client,
387 size_t len, size_t align, unsigned int flags)
388{
389 return ERR_PTR(-ENODEV);
390}
391
392static inline void ion_free(struct ion_client *client,
393 struct ion_handle *handle) { }
394
395
396static inline int ion_phys(struct ion_client *client,
397 struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len)
398{
399 return -ENODEV;
400}
401
402static inline void *ion_map_kernel(struct ion_client *client,
403 struct ion_handle *handle, unsigned long flags)
404{
405 return ERR_PTR(-ENODEV);
406}
407
408static inline void ion_unmap_kernel(struct ion_client *client,
409 struct ion_handle *handle) { }
410
411static inline struct scatterlist *ion_map_dma(struct ion_client *client,
412 struct ion_handle *handle, unsigned long flags)
413{
414 return ERR_PTR(-ENODEV);
415}
416
417static inline void ion_unmap_dma(struct ion_client *client,
418 struct ion_handle *handle) { }
419
420static inline struct ion_buffer *ion_share(struct ion_client *client,
421 struct ion_handle *handle)
422{
423 return ERR_PTR(-ENODEV);
424}
425
426static inline struct ion_handle *ion_import(struct ion_client *client,
427 struct ion_buffer *buffer)
428{
429 return ERR_PTR(-ENODEV);
430}
431
432static inline struct ion_handle *ion_import_fd(struct ion_client *client,
433 int fd)
434{
435 return ERR_PTR(-ENODEV);
436}
437
438static inline int ion_handle_get_flags(struct ion_client *client,
439 struct ion_handle *handle, unsigned long *flags)
440{
441 return -ENODEV;
442}
Laura Abbott8c017362011-09-22 20:59:12 -0700443
444static inline int ion_map_iommu(struct ion_client *client,
445 struct ion_handle *handle, int domain_num,
446 int partition_num, unsigned long align,
447 unsigned long iova_length, unsigned long *iova,
448 unsigned long flags)
449{
450 return -ENODEV;
451}
452
453static inline void ion_unmap_iommu(struct ion_client *client,
454 struct ion_handle *handle, int domain_num,
455 int partition_num)
456{
457 return;
458}
459
460
Jordan Crouse8cd48322011-10-12 17:05:19 -0600461#endif /* CONFIG_ION */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700462#endif /* __KERNEL__ */
463
464/**
465 * DOC: Ion Userspace API
466 *
467 * create a client by opening /dev/ion
468 * most operations handled via following ioctls
469 *
470 */
471
472/**
473 * struct ion_allocation_data - metadata passed from userspace for allocations
474 * @len: size of the allocation
475 * @align: required alignment of the allocation
476 * @flags: flags passed to heap
477 * @handle: pointer that will be populated with a cookie to use to refer
478 * to this allocation
479 *
480 * Provided by userspace as an argument to the ioctl
481 */
482struct ion_allocation_data {
483 size_t len;
484 size_t align;
485 unsigned int flags;
486 struct ion_handle *handle;
487};
488
489/**
490 * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
491 * @handle: a handle
492 * @fd: a file descriptor representing that handle
493 *
494 * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
495 * the handle returned from ion alloc, and the kernel returns the file
496 * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
497 * provides the file descriptor and the kernel returns the handle.
498 */
499struct ion_fd_data {
500 struct ion_handle *handle;
501 int fd;
502};
503
504/**
505 * struct ion_handle_data - a handle passed to/from the kernel
506 * @handle: a handle
507 */
508struct ion_handle_data {
509 struct ion_handle *handle;
510};
511
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700512/**
513 * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
514 * @cmd: the custom ioctl function to call
515 * @arg: additional data to pass to the custom ioctl, typically a user
516 * pointer to a predefined structure
517 *
518 * This works just like the regular cmd and arg fields of an ioctl.
519 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700520struct ion_custom_data {
521 unsigned int cmd;
522 unsigned long arg;
523};
524
Laura Abbottabcb6f72011-10-04 16:26:49 -0700525
526/* struct ion_flush_data - data passed to ion for flushing caches
527 *
528 * @handle: handle with data to flush
Laura Abbotte80ea012011-11-18 18:36:47 -0800529 * @fd: fd to flush
Laura Abbottabcb6f72011-10-04 16:26:49 -0700530 * @vaddr: userspace virtual address mapped with mmap
531 * @offset: offset into the handle to flush
532 * @length: length of handle to flush
533 *
534 * Performs cache operations on the handle. If p is the start address
535 * of the handle, p + offset through p + offset + length will have
536 * the cache operations performed
537 */
538struct ion_flush_data {
539 struct ion_handle *handle;
Laura Abbotte80ea012011-11-18 18:36:47 -0800540 int fd;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700541 void *vaddr;
542 unsigned int offset;
543 unsigned int length;
544};
Laura Abbott273dd8e2011-10-12 14:26:33 -0700545
546/* struct ion_flag_data - information about flags for this buffer
547 *
548 * @handle: handle to get flags from
549 * @flags: flags of this handle
550 *
551 * Takes handle as an input and outputs the flags from the handle
552 * in the flag field.
553 */
554struct ion_flag_data {
555 struct ion_handle *handle;
556 unsigned long flags;
557};
558
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700559#define ION_IOC_MAGIC 'I'
560
561/**
562 * DOC: ION_IOC_ALLOC - allocate memory
563 *
564 * Takes an ion_allocation_data struct and returns it with the handle field
565 * populated with the opaque handle for the allocation.
566 */
567#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
568 struct ion_allocation_data)
569
570/**
571 * DOC: ION_IOC_FREE - free memory
572 *
573 * Takes an ion_handle_data struct and frees the handle.
574 */
575#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
576
577/**
578 * DOC: ION_IOC_MAP - get a file descriptor to mmap
579 *
580 * Takes an ion_fd_data struct with the handle field populated with a valid
581 * opaque handle. Returns the struct with the fd field set to a file
582 * descriptor open in the current address space. This file descriptor
583 * can then be used as an argument to mmap.
584 */
585#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
586
587/**
588 * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
589 *
590 * Takes an ion_fd_data struct with the handle field populated with a valid
591 * opaque handle. Returns the struct with the fd field set to a file
592 * descriptor open in the current address space. This file descriptor
593 * can then be passed to another process. The corresponding opaque handle can
594 * be retrieved via ION_IOC_IMPORT.
595 */
596#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
597
598/**
599 * DOC: ION_IOC_IMPORT - imports a shared file descriptor
600 *
601 * Takes an ion_fd_data struct with the fd field populated with a valid file
602 * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
603 * filed set to the corresponding opaque handle.
604 */
605#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, int)
606
607/**
608 * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
609 *
610 * Takes the argument of the architecture specific ioctl to call and
611 * passes appropriate userdata for that ioctl
612 */
613#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
614
Laura Abbottabcb6f72011-10-04 16:26:49 -0700615
616/**
617 * DOC: ION_IOC_CLEAN_CACHES - clean the caches
618 *
619 * Clean the caches of the handle specified.
620 */
621#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MAGIC, 7, \
622 struct ion_flush_data)
623/**
624 * DOC: ION_MSM_IOC_INV_CACHES - invalidate the caches
625 *
626 * Invalidate the caches of the handle specified.
627 */
628#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MAGIC, 8, \
629 struct ion_flush_data)
630/**
631 * DOC: ION_MSM_IOC_CLEAN_CACHES - clean and invalidate the caches
632 *
633 * Clean and invalidate the caches of the handle specified.
634 */
635#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MAGIC, 9, \
636 struct ion_flush_data)
Laura Abbott273dd8e2011-10-12 14:26:33 -0700637
638/**
639 * DOC: ION_IOC_GET_FLAGS - get the flags of the handle
640 *
641 * Gets the flags of the current handle which indicate cachability,
642 * secure state etc.
643 */
644#define ION_IOC_GET_FLAGS _IOWR(ION_IOC_MAGIC, 10, \
645 struct ion_flag_data)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700646#endif /* _LINUX_ION_H */