blob: 7c5400475551fbc97cf222b6ea7a4d546c722f1e [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * include/linux/ion.h
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef _LINUX_ION_H
19#define _LINUX_ION_H
20
Laura Abbottabcb6f72011-10-04 16:26:49 -070021#include <linux/ioctl.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070022#include <linux/types.h>
23
24struct ion_handle;
25/**
26 * enum ion_heap_types - list of all possible types of heaps
Iliyan Malchevf22301562011-07-06 16:53:21 -070027 * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
28 * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
29 * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
Olav Hauganb5be7992011-11-18 14:29:02 -080030 * carveout heap, allocations are physically
31 * contiguous
Olav Haugan0a852512012-01-09 10:20:55 -080032 * @ION_HEAP_TYPE_IOMMU: IOMMU memory
33 * @ION_HEAP_TYPE_CP: memory allocated from a prereserved
34 * carveout heap, allocations are physically
35 * contiguous. Used for content protection.
Benjamin Gaignard07b590e2012-08-15 10:55:10 -070036 * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
Olav Haugan0a852512012-01-09 10:20:55 -080037 * @ION_HEAP_END: helper for iterating over heaps
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070038 */
39enum ion_heap_type {
40 ION_HEAP_TYPE_SYSTEM,
41 ION_HEAP_TYPE_SYSTEM_CONTIG,
42 ION_HEAP_TYPE_CARVEOUT,
Benjamin Gaignard07b590e2012-08-15 10:55:10 -070043 ION_HEAP_TYPE_DMA,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070044 ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
45 are at the end of this enum */
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070046 ION_NUM_HEAPS,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070047};
48
Iliyan Malchevf22301562011-07-06 16:53:21 -070049#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
50#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
51#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
Benjamin Gaignard07b590e2012-08-15 10:55:10 -070052#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070053
Mitchel Humpherys227a6582012-09-11 15:59:11 -070054/**
55 * heap flags - the lower 16 bits are used by core ion, the upper 16
56 * bits are reserved for use by the heaps themselves.
57 */
58#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
59 cached, ion will do cache
60 maintenance when the buffer is
61 mapped for dma */
Rebecca Schultz Zavin3edb9002012-09-19 23:31:05 -070062#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
63 at mmap time, if this is set
64 caches must be managed manually */
Laura Abbotta2e93632011-08-19 13:36:32 -070065
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070066#ifdef __KERNEL__
Laura Abbott65576962011-10-31 12:13:25 -070067#include <linux/err.h>
Laura Abbottcffdff52011-09-23 10:40:19 -070068#include <mach/ion.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070069struct ion_device;
70struct ion_heap;
71struct ion_mapper;
72struct ion_client;
73struct ion_buffer;
74
75/* This should be removed some day when phys_addr_t's are fully
76 plumbed in the kernel, and all instances of ion_phys_addr_t should
77 be converted to phys_addr_t. For the time being many kernel interfaces
78 do not accept phys_addr_t's that would have to */
79#define ion_phys_addr_t unsigned long
80
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070081/**
82 * struct ion_platform_heap - defines a heap in the given platform
83 * @type: type of the heap from ion_heap_type enum
Olav Hauganee0f7802011-12-19 13:28:57 -080084 * @id: unique identifier for heap. When allocating (lower numbers
Olav Hauganb5be7992011-11-18 14:29:02 -080085 * will be allocated from first)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070086 * @name: used for debug purposes
87 * @base: base address of heap in physical memory if applicable
88 * @size: size of the heap in bytes if applicable
Laura Abbottcaafeea2011-12-13 11:43:10 -080089 * @memory_type:Memory type used for the heap
Olav Haugan85c95402012-05-30 17:32:37 -070090 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
Laura Abbottcaafeea2011-12-13 11:43:10 -080091 * @extra_data: Extra data specific to each heap type
Benjamin Gaignard8dff0a62012-06-25 15:30:18 -070092 * @priv: heap private data
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070093 */
94struct ion_platform_heap {
95 enum ion_heap_type type;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070096 unsigned int id;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070097 const char *name;
98 ion_phys_addr_t base;
99 size_t size;
Laura Abbotta2e93632011-08-19 13:36:32 -0700100 enum ion_memory_types memory_type;
Olav Haugan85c95402012-05-30 17:32:37 -0700101 unsigned int has_outer_cache;
Olav Haugan0703dbf2011-12-19 17:53:38 -0800102 void *extra_data;
Benjamin Gaignard8dff0a62012-06-25 15:30:18 -0700103 void *priv;
Olav Haugan0703dbf2011-12-19 17:53:38 -0800104};
105
Laura Abbottcaafeea2011-12-13 11:43:10 -0800106/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700107 * struct ion_platform_data - array of platform heaps passed from board file
Olav Haugan85c95402012-05-30 17:32:37 -0700108 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
Alex Bird27ca6612011-11-01 14:40:06 -0700109 * @nr: number of structures in the array
110 * @request_region: function to be called when the number of allocations goes
111 * from 0 -> 1
112 * @release_region: function to be called when the number of allocations goes
113 * from 1 -> 0
114 * @setup_region: function to be called upon ion registration
115 * @heaps: array of platform_heap structions
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700116 *
117 * Provided by the board file in the form of platform data to a platform device.
118 */
119struct ion_platform_data {
Olav Haugan85c95402012-05-30 17:32:37 -0700120 unsigned int has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700121 int nr;
Olav Hauganee0f7802011-12-19 13:28:57 -0800122 int (*request_region)(void *);
123 int (*release_region)(void *);
Alex Bird27ca6612011-11-01 14:40:06 -0700124 void *(*setup_region)(void);
Benjamin Gaignard63d81032012-06-25 15:27:30 -0700125 struct ion_platform_heap *heaps;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700126};
127
Jordan Crouse8cd48322011-10-12 17:05:19 -0600128#ifdef CONFIG_ION
129
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700130/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800131 * ion_reserve() - reserve memory for ion heaps if applicable
132 * @data: platform data specifying starting physical address and
133 * size
134 *
135 * Calls memblock reserve to set aside memory for heaps that are
136 * located at specific memory addresses or of specfic sizes not
137 * managed by the kernel
138 */
139void ion_reserve(struct ion_platform_data *data);
140
141/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700142 * ion_client_create() - allocate a client and returns it
143 * @dev: the global ion device
144 * @heap_mask: mask of heaps this client can allocate from
145 * @name: used for debugging
146 */
147struct ion_client *ion_client_create(struct ion_device *dev,
148 unsigned int heap_mask, const char *name);
149
150/**
Laura Abbott302911d2011-08-15 17:12:57 -0700151 * msm_ion_client_create - allocate a client using the ion_device specified in
152 * drivers/gpu/ion/msm/msm_ion.c
153 *
154 * heap_mask and name are the same as ion_client_create, return values
155 * are the same as ion_client_create.
156 */
157
158struct ion_client *msm_ion_client_create(unsigned int heap_mask,
159 const char *name);
160
161/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700162 * ion_client_destroy() - free's a client and all it's handles
163 * @client: the client
164 *
165 * Free the provided client and all it's resources including
166 * any handles it is holding.
167 */
168void ion_client_destroy(struct ion_client *client);
169
170/**
171 * ion_alloc - allocate ion memory
172 * @client: the client
173 * @len: size of the allocation
174 * @align: requested allocation alignment, lots of hardware blocks have
175 * alignment requirements of some kind
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700176 * @heap_mask: mask of heaps to allocate from, if multiple bits are set
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700177 * heaps will be tried in order from lowest to highest order bit
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700178 * @flags: heap flags, the low 16 bits are consumed by ion, the high 16
179 * bits are passed on to the respective heap and can be heap
180 * custom
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700181 *
182 * Allocate memory in one of the heaps provided in heap mask and return
183 * an opaque handle to it.
184 */
185struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700186 size_t align, unsigned int heap_mask,
187 unsigned int flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700188
189/**
190 * ion_free - free a handle
191 * @client: the client
192 * @handle: the handle to free
193 *
194 * Free the provided handle.
195 */
196void ion_free(struct ion_client *client, struct ion_handle *handle);
197
198/**
199 * ion_phys - returns the physical address and len of a handle
200 * @client: the client
201 * @handle: the handle
202 * @addr: a pointer to put the address in
203 * @len: a pointer to put the length in
204 *
205 * This function queries the heap for a particular handle to get the
206 * handle's physical address. It't output is only correct if
207 * a heap returns physically contiguous memory -- in other cases
Laura Abbottb14ed962012-01-30 14:18:08 -0800208 * this api should not be implemented -- ion_sg_table should be used
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700209 * instead. Returns -EINVAL if the handle is invalid. This has
210 * no implications on the reference counting of the handle --
211 * the returned value may not be valid if the caller is not
212 * holding a reference.
213 */
214int ion_phys(struct ion_client *client, struct ion_handle *handle,
215 ion_phys_addr_t *addr, size_t *len);
216
217/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800218 * ion_map_dma - return an sg_table describing a handle
219 * @client: the client
220 * @handle: the handle
221 *
222 * This function returns the sg_table describing
223 * a particular ion handle.
224 */
225struct sg_table *ion_sg_table(struct ion_client *client,
226 struct ion_handle *handle);
227
228/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700229 * ion_map_kernel - create mapping for the given handle
230 * @client: the client
231 * @handle: handle to map
232 *
233 * Map the given handle into the kernel and return a kernel address that
Mitchel Humpherysc4dba0a2012-11-05 14:06:18 -0800234 * can be used to access this address.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700235 */
Mitchel Humpherys911b4b72012-09-12 14:42:50 -0700236void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700237
238/**
239 * ion_unmap_kernel() - destroy a kernel mapping for a handle
240 * @client: the client
241 * @handle: handle to unmap
242 */
243void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
244
245/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800246 * ion_share_dma_buf() - given an ion client, create a dma-buf fd
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700247 * @client: the client
Laura Abbottb14ed962012-01-30 14:18:08 -0800248 * @handle: the handle
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700249 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800250int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700251
252/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800253 * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700254 * @client: the client
Laura Abbottb14ed962012-01-30 14:18:08 -0800255 * @fd: the dma-buf fd
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700256 *
Laura Abbottb14ed962012-01-30 14:18:08 -0800257 * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
258 * import that fd and return a handle representing it. If a dma-buf from
259 * another exporter is passed in this function will return ERR_PTR(-EINVAL)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700260 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800261struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700262
Laura Abbott273dd8e2011-10-12 14:26:33 -0700263/**
264 * ion_handle_get_flags - get the flags for a given handle
265 *
266 * @client - client who allocated the handle
267 * @handle - handle to get the flags
268 * @flags - pointer to store the flags
269 *
270 * Gets the current flags for a handle. These flags indicate various options
271 * of the buffer (caching, security, etc.)
272 */
273int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
274 unsigned long *flags);
275
Laura Abbott8c017362011-09-22 20:59:12 -0700276
277/**
278 * ion_map_iommu - map the given handle into an iommu
279 *
280 * @client - client who allocated the handle
281 * @handle - handle to map
282 * @domain_num - domain number to map to
283 * @partition_num - partition number to allocate iova from
284 * @align - alignment for the iova
285 * @iova_length - length of iova to map. If the iova length is
286 * greater than the handle length, the remaining
287 * address space will be mapped to a dummy buffer.
288 * @iova - pointer to store the iova address
289 * @buffer_size - pointer to store the size of the buffer
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700290 * @flags - flags for options to map
Olav Hauganb3676592012-03-02 15:02:25 -0800291 * @iommu_flags - flags specific to the iommu.
Laura Abbott8c017362011-09-22 20:59:12 -0700292 *
293 * Maps the handle into the iova space specified via domain number. Iova
294 * will be allocated from the partition specified via partition_num.
295 * Returns 0 on success, negative value on error.
296 */
297int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
298 int domain_num, int partition_num, unsigned long align,
299 unsigned long iova_length, unsigned long *iova,
300 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800301 unsigned long flags, unsigned long iommu_flags);
Laura Abbott8c017362011-09-22 20:59:12 -0700302
303
304/**
305 * ion_handle_get_size - get the allocated size of a given handle
306 *
307 * @client - client who allocated the handle
308 * @handle - handle to get the size
309 * @size - pointer to store the size
310 *
311 * gives the allocated size of a handle. returns 0 on success, negative
312 * value on error
313 *
314 * NOTE: This is intended to be used only to get a size to pass to map_iommu.
315 * You should *NOT* rely on this for any other usage.
316 */
317
318int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
319 unsigned long *size);
320
321/**
322 * ion_unmap_iommu - unmap the handle from an iommu
323 *
324 * @client - client who allocated the handle
325 * @handle - handle to unmap
326 * @domain_num - domain to unmap from
327 * @partition_num - partition to unmap from
328 *
329 * Decrement the reference count on the iommu mapping. If the count is
330 * 0, the mapping will be removed from the iommu.
331 */
332void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
333 int domain_num, int partition_num);
334
335
Olav Haugan0a852512012-01-09 10:20:55 -0800336/**
337 * ion_secure_heap - secure a heap
338 *
339 * @client - a client that has allocated from the heap heap_id
340 * @heap_id - heap id to secure.
Laura Abbott7e446482012-06-13 15:59:39 -0700341 * @version - version of content protection
342 * @data - extra data needed for protection
Olav Haugan0a852512012-01-09 10:20:55 -0800343 *
344 * Secure a heap
345 * Returns 0 on success
346 */
Laura Abbott7e446482012-06-13 15:59:39 -0700347int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
348 void *data);
Olav Haugan0a852512012-01-09 10:20:55 -0800349
350/**
351 * ion_unsecure_heap - un-secure a heap
352 *
353 * @client - a client that has allocated from the heap heap_id
354 * @heap_id - heap id to un-secure.
Laura Abbott7e446482012-06-13 15:59:39 -0700355 * @version - version of content protection
356 * @data - extra data needed for protection
Olav Haugan0a852512012-01-09 10:20:55 -0800357 *
358 * Un-secure a heap
359 * Returns 0 on success
360 */
Laura Abbott7e446482012-06-13 15:59:39 -0700361int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
362 void *data);
Olav Haugan0a852512012-01-09 10:20:55 -0800363
364/**
Olav Haugan41f85792012-02-08 15:28:05 -0800365 * msm_ion_do_cache_op - do cache operations.
366 *
367 * @client - pointer to ION client.
368 * @handle - pointer to buffer handle.
369 * @vaddr - virtual address to operate on.
370 * @len - Length of data to do cache operation on.
371 * @cmd - Cache operation to perform:
372 * ION_IOC_CLEAN_CACHES
373 * ION_IOC_INV_CACHES
374 * ION_IOC_CLEAN_INV_CACHES
375 *
376 * Returns 0 on success
377 */
378int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
379 void *vaddr, unsigned long len, unsigned int cmd);
380
Jordan Crouse8cd48322011-10-12 17:05:19 -0600381#else
Laura Abbottb14ed962012-01-30 14:18:08 -0800382static inline void ion_reserve(struct ion_platform_data *data)
383{
384
385}
386
Jordan Crouse8cd48322011-10-12 17:05:19 -0600387static inline struct ion_client *ion_client_create(struct ion_device *dev,
388 unsigned int heap_mask, const char *name)
389{
390 return ERR_PTR(-ENODEV);
391}
Laura Abbott273dd8e2011-10-12 14:26:33 -0700392
Jordan Crouse8cd48322011-10-12 17:05:19 -0600393static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
394 const char *name)
395{
396 return ERR_PTR(-ENODEV);
397}
398
399static inline void ion_client_destroy(struct ion_client *client) { }
400
401static inline struct ion_handle *ion_alloc(struct ion_client *client,
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700402 size_t len, size_t align,
403 unsigned int heap_mask,
404 unsigned int flags)
Jordan Crouse8cd48322011-10-12 17:05:19 -0600405{
406 return ERR_PTR(-ENODEV);
407}
408
409static inline void ion_free(struct ion_client *client,
410 struct ion_handle *handle) { }
411
412
413static inline int ion_phys(struct ion_client *client,
414 struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len)
415{
416 return -ENODEV;
417}
418
Laura Abbottb14ed962012-01-30 14:18:08 -0800419static inline struct sg_table *ion_sg_table(struct ion_client *client,
420 struct ion_handle *handle)
421{
422 return ERR_PTR(-ENODEV);
423}
424
Jordan Crouse8cd48322011-10-12 17:05:19 -0600425static inline void *ion_map_kernel(struct ion_client *client,
Mitchel Humpherysbaa86922012-11-02 16:35:39 -0700426 struct ion_handle *handle)
Jordan Crouse8cd48322011-10-12 17:05:19 -0600427{
428 return ERR_PTR(-ENODEV);
429}
430
431static inline void ion_unmap_kernel(struct ion_client *client,
432 struct ion_handle *handle) { }
433
Laura Abbottb14ed962012-01-30 14:18:08 -0800434static inline int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
Jordan Crouse8cd48322011-10-12 17:05:19 -0600435{
Laura Abbottb14ed962012-01-30 14:18:08 -0800436 return -ENODEV;
Jordan Crouse8cd48322011-10-12 17:05:19 -0600437}
438
Laura Abbottb14ed962012-01-30 14:18:08 -0800439static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
Jordan Crouse8cd48322011-10-12 17:05:19 -0600440{
441 return ERR_PTR(-ENODEV);
442}
443
444static inline int ion_handle_get_flags(struct ion_client *client,
445 struct ion_handle *handle, unsigned long *flags)
446{
447 return -ENODEV;
448}
Laura Abbott8c017362011-09-22 20:59:12 -0700449
450static inline int ion_map_iommu(struct ion_client *client,
451 struct ion_handle *handle, int domain_num,
452 int partition_num, unsigned long align,
453 unsigned long iova_length, unsigned long *iova,
Olav Haugan9a27d4c2012-02-23 09:35:16 -0800454 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800455 unsigned long flags,
456 unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700457{
458 return -ENODEV;
459}
460
Mitchel Humpherysbaa86922012-11-02 16:35:39 -0700461static inline int ion_handle_get_size(struct ion_client *client,
462 struct ion_handle *handle, unsigned long *size)
463{
464 return -ENODEV;
465}
466
Laura Abbott8c017362011-09-22 20:59:12 -0700467static inline void ion_unmap_iommu(struct ion_client *client,
468 struct ion_handle *handle, int domain_num,
469 int partition_num)
470{
471 return;
472}
473
Laura Abbott7e446482012-06-13 15:59:39 -0700474static inline int ion_secure_heap(struct ion_device *dev, int heap_id,
475 int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800476{
477 return -ENODEV;
Laura Abbott8c017362011-09-22 20:59:12 -0700478
Olav Haugan0a852512012-01-09 10:20:55 -0800479}
480
Laura Abbott7e446482012-06-13 15:59:39 -0700481static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id,
482 int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800483{
484 return -ENODEV;
485}
486
Mitchel Humpherysa75e4eb2012-12-14 16:12:23 -0800487static inline void ion_mark_dangling_buffers_locked(struct ion_device *dev)
488{
489}
490
Olav Haugan41f85792012-02-08 15:28:05 -0800491static inline int msm_ion_do_cache_op(struct ion_client *client,
492 struct ion_handle *handle, void *vaddr,
493 unsigned long len, unsigned int cmd)
494{
495 return -ENODEV;
496}
497
Jordan Crouse8cd48322011-10-12 17:05:19 -0600498#endif /* CONFIG_ION */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700499#endif /* __KERNEL__ */
500
501/**
502 * DOC: Ion Userspace API
503 *
504 * create a client by opening /dev/ion
505 * most operations handled via following ioctls
506 *
507 */
508
509/**
510 * struct ion_allocation_data - metadata passed from userspace for allocations
511 * @len: size of the allocation
512 * @align: required alignment of the allocation
Hanumant Singh7d72bad2012-08-29 18:39:44 -0700513 * @heap_mask: mask of heaps to allocate from
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700514 * @flags: flags passed to heap
515 * @handle: pointer that will be populated with a cookie to use to refer
516 * to this allocation
517 *
518 * Provided by userspace as an argument to the ioctl
519 */
520struct ion_allocation_data {
521 size_t len;
522 size_t align;
Laura Abbott0eec1512012-08-27 13:14:39 -0700523 unsigned int heap_mask;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700524 unsigned int flags;
525 struct ion_handle *handle;
526};
527
528/**
529 * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
530 * @handle: a handle
531 * @fd: a file descriptor representing that handle
532 *
533 * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
534 * the handle returned from ion alloc, and the kernel returns the file
535 * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
536 * provides the file descriptor and the kernel returns the handle.
537 */
538struct ion_fd_data {
539 struct ion_handle *handle;
540 int fd;
541};
542
543/**
544 * struct ion_handle_data - a handle passed to/from the kernel
545 * @handle: a handle
546 */
547struct ion_handle_data {
548 struct ion_handle *handle;
549};
550
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700551/**
552 * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
553 * @cmd: the custom ioctl function to call
554 * @arg: additional data to pass to the custom ioctl, typically a user
555 * pointer to a predefined structure
556 *
557 * This works just like the regular cmd and arg fields of an ioctl.
558 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700559struct ion_custom_data {
560 unsigned int cmd;
561 unsigned long arg;
562};
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700563#define ION_IOC_MAGIC 'I'
564
565/**
566 * DOC: ION_IOC_ALLOC - allocate memory
567 *
568 * Takes an ion_allocation_data struct and returns it with the handle field
569 * populated with the opaque handle for the allocation.
570 */
571#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
572 struct ion_allocation_data)
573
574/**
575 * DOC: ION_IOC_FREE - free memory
576 *
577 * Takes an ion_handle_data struct and frees the handle.
578 */
579#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
580
581/**
582 * DOC: ION_IOC_MAP - get a file descriptor to mmap
583 *
584 * Takes an ion_fd_data struct with the handle field populated with a valid
585 * opaque handle. Returns the struct with the fd field set to a file
586 * descriptor open in the current address space. This file descriptor
587 * can then be used as an argument to mmap.
588 */
589#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
590
591/**
592 * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
593 *
594 * Takes an ion_fd_data struct with the handle field populated with a valid
595 * opaque handle. Returns the struct with the fd field set to a file
596 * descriptor open in the current address space. This file descriptor
597 * can then be passed to another process. The corresponding opaque handle can
598 * be retrieved via ION_IOC_IMPORT.
599 */
600#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
601
602/**
603 * DOC: ION_IOC_IMPORT - imports a shared file descriptor
604 *
605 * Takes an ion_fd_data struct with the fd field populated with a valid file
606 * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
607 * filed set to the corresponding opaque handle.
608 */
Laura Abbott0eec1512012-08-27 13:14:39 -0700609#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700610
611/**
Rebecca Schultz Zavinf4419222012-06-26 13:17:34 -0700612 * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
613 *
614 * Deprecated in favor of using the dma_buf api's correctly (syncing
615 * will happend automatically when the buffer is mapped to a device).
616 * If necessary should be used after touching a cached buffer from the cpu,
617 * this will make the buffer in memory coherent.
618 */
619#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
620
621/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700622 * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
623 *
624 * Takes the argument of the architecture specific ioctl to call and
625 * passes appropriate userdata for that ioctl
626 */
627#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
628
Laura Abbottabcb6f72011-10-04 16:26:49 -0700629
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700630#endif /* _LINUX_ION_H */