Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/gpu/ion/ion_priv.h |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
Laura Abbott | a8c373f | 2013-02-15 09:25:35 -0800 | [diff] [blame] | 5 | * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | |
| 18 | #ifndef _ION_PRIV_H |
| 19 | #define _ION_PRIV_H |
| 20 | |
Rebecca Schultz Zavin | 050372e | 2012-06-07 16:36:44 -0700 | [diff] [blame] | 21 | #include <linux/ion.h> |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 22 | #include <linux/kref.h> |
| 23 | #include <linux/mm_types.h> |
| 24 | #include <linux/mutex.h> |
| 25 | #include <linux/rbtree.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 26 | #include <linux/seq_file.h> |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 27 | |
Laura Abbott | 61e8959 | 2013-03-21 10:55:17 -0700 | [diff] [blame] | 28 | #include "msm_ion_priv.h" |
Rebecca Schultz Zavin | 050372e | 2012-06-07 16:36:44 -0700 | [diff] [blame] | 29 | #include <linux/sched.h> |
| 30 | #include <linux/shrinker.h> |
| 31 | #include <linux/types.h> |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 32 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 33 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); |
| 34 | |
| 35 | /** |
| 36 | * struct ion_buffer - metadata for a particular buffer |
| 37 | * @ref: refernce count |
| 38 | * @node: node in the ion_device buffers tree |
| 39 | * @dev: back pointer to the ion_device |
| 40 | * @heap: back pointer to the heap the buffer came from |
| 41 | * @flags: buffer specific flags |
| 42 | * @size: size of the buffer |
| 43 | * @priv_virt: private data to the buffer representable as |
| 44 | * a void * |
| 45 | * @priv_phys: private data to the buffer representable as |
| 46 | * an ion_phys_addr_t (and someday a phys_addr_t) |
| 47 | * @lock: protects the buffers cnt fields |
| 48 | * @kmap_cnt: number of times the buffer is mapped to the kernel |
| 49 | * @vaddr: the kenrel mapping if kmap_cnt is not zero |
| 50 | * @dmap_cnt: number of times the buffer is mapped for dma |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 51 | * @sg_table: the sg table for the buffer if dmap_cnt is not zero |
Rebecca Schultz Zavin | db70ae6 | 2012-08-28 17:27:22 -0700 | [diff] [blame] | 52 | * @dirty: bitmask representing which pages of this buffer have |
| 53 | * been dirtied by the cpu and need cache maintenance |
| 54 | * before dma |
| 55 | * @vmas: list of vma's mapping this buffer |
| 56 | * @handle_count: count of handles referencing this buffer |
| 57 | * @task_comm: taskcomm of last client to reference this buffer in a |
| 58 | * handle, used for debugging |
| 59 | * @pid: pid of last client to reference this buffer in a |
| 60 | * handle, used for debugging |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 61 | */ |
| 62 | struct ion_buffer { |
| 63 | struct kref ref; |
Rebecca Schultz Zavin | 618d6be | 2013-02-13 14:48:11 -0800 | [diff] [blame] | 64 | union { |
| 65 | struct rb_node node; |
| 66 | struct list_head list; |
| 67 | }; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 68 | struct ion_device *dev; |
| 69 | struct ion_heap *heap; |
| 70 | unsigned long flags; |
| 71 | size_t size; |
| 72 | union { |
| 73 | void *priv_virt; |
| 74 | ion_phys_addr_t priv_phys; |
| 75 | }; |
| 76 | struct mutex lock; |
| 77 | int kmap_cnt; |
| 78 | void *vaddr; |
| 79 | int dmap_cnt; |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 80 | struct sg_table *sg_table; |
Rebecca Schultz Zavin | b179067 | 2012-06-14 15:08:53 -0700 | [diff] [blame] | 81 | unsigned long *dirty; |
| 82 | struct list_head vmas; |
Rebecca Schultz Zavin | db70ae6 | 2012-08-28 17:27:22 -0700 | [diff] [blame] | 83 | /* used to track orphaned buffers */ |
| 84 | int handle_count; |
| 85 | char task_comm[TASK_COMM_LEN]; |
| 86 | pid_t pid; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 87 | }; |
Rebecca Schultz Zavin | 83ff5da | 2013-05-23 13:37:25 -0700 | [diff] [blame] | 88 | void ion_buffer_destroy(struct ion_buffer *buffer); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 89 | |
| 90 | /** |
| 91 | * struct ion_heap_ops - ops to operate on a given heap |
| 92 | * @allocate: allocate memory |
Mitchel Humpherys | 5860743 | 2013-08-06 15:08:23 -0700 | [diff] [blame] | 93 | * @free: free memory. Will be called with |
| 94 | * ION_FLAG_FREED_FROM_SHRINKER set in buffer flags when |
| 95 | * called from a shrinker. In that case, the pages being |
| 96 | * free'd must be truly free'd back to the system, not put |
| 97 | * in a page pool or otherwise cached. |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 98 | * @phys get physical address of a buffer (only define on |
| 99 | * physically contiguous heaps) |
| 100 | * @map_dma map the memory for dma to a scatterlist |
| 101 | * @unmap_dma unmap the memory for dma |
| 102 | * @map_kernel map memory to the kernel |
| 103 | * @unmap_kernel unmap memory to the kernel |
| 104 | * @map_user map memory to userspace |
Alex Bird | 8a3ede3 | 2011-11-07 12:33:42 -0800 | [diff] [blame] | 105 | * @unmap_user unmap memory to userspace |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 106 | */ |
| 107 | struct ion_heap_ops { |
| 108 | int (*allocate) (struct ion_heap *heap, |
| 109 | struct ion_buffer *buffer, unsigned long len, |
| 110 | unsigned long align, unsigned long flags); |
| 111 | void (*free) (struct ion_buffer *buffer); |
| 112 | int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, |
| 113 | ion_phys_addr_t *addr, size_t *len); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 114 | struct sg_table *(*map_dma) (struct ion_heap *heap, |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 115 | struct ion_buffer *buffer); |
| 116 | void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 117 | void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 118 | void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); |
| 119 | int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, |
Laura Abbott | b14ed96 | 2012-01-30 14:18:08 -0800 | [diff] [blame] | 120 | struct vm_area_struct *vma); |
Alex Bird | 8a3ede3 | 2011-11-07 12:33:42 -0800 | [diff] [blame] | 121 | void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer); |
Olav Haugan | 0671b9a | 2012-05-25 11:58:56 -0700 | [diff] [blame] | 122 | int (*print_debug)(struct ion_heap *heap, struct seq_file *s, |
Mitchel Humpherys | ee0aa9c | 2013-11-15 22:56:04 -0800 | [diff] [blame^] | 123 | const struct list_head *mem_map); |
Laura Abbott | 7e44648 | 2012-06-13 15:59:39 -0700 | [diff] [blame] | 124 | int (*secure_heap)(struct ion_heap *heap, int version, void *data); |
| 125 | int (*unsecure_heap)(struct ion_heap *heap, int version, void *data); |
Laura Abbott | 9361930 | 2012-10-11 11:51:40 -0700 | [diff] [blame] | 126 | int (*secure_buffer)(struct ion_buffer *buffer, int version, |
| 127 | void *data, int flags); |
| 128 | int (*unsecure_buffer)(struct ion_buffer *buffer, int force_unsecure); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 129 | }; |
| 130 | |
| 131 | /** |
Rebecca Schultz Zavin | 618d6be | 2013-02-13 14:48:11 -0800 | [diff] [blame] | 132 | * heap flags - flags between the heaps and core ion code |
| 133 | */ |
| 134 | #define ION_HEAP_FLAG_DEFER_FREE (1 << 0) |
| 135 | |
| 136 | /** |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 137 | * struct ion_heap - represents a heap in the system |
| 138 | * @node: rb node to put the heap on the device's tree of heaps |
| 139 | * @dev: back pointer to the ion_device |
| 140 | * @type: type of heap |
| 141 | * @ops: ops struct as above |
Rebecca Schultz Zavin | 618d6be | 2013-02-13 14:48:11 -0800 | [diff] [blame] | 142 | * @flags: flags |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 143 | * @id: id of heap, also indicates priority of this heap when |
| 144 | * allocating. These are specified by platform data and |
| 145 | * MUST be unique |
| 146 | * @name: used for debugging |
Rebecca Schultz Zavin | 83ff5da | 2013-05-23 13:37:25 -0700 | [diff] [blame] | 147 | * @shrinker: a shrinker for the heap, if the heap caches system |
| 148 | * memory, it must define a shrinker to return it on low |
| 149 | * memory conditions, this includes system memory cached |
| 150 | * in the deferred free lists for heaps that support it |
Benjamin Gaignard | 8dff0a6 | 2012-06-25 15:30:18 -0700 | [diff] [blame] | 151 | * @priv: private heap data |
Rebecca Schultz Zavin | 618d6be | 2013-02-13 14:48:11 -0800 | [diff] [blame] | 152 | * @free_list: free list head if deferred free is used |
Rebecca Schultz Zavin | 83ff5da | 2013-05-23 13:37:25 -0700 | [diff] [blame] | 153 | * @free_list_size size of the deferred free list in bytes |
Rebecca Schultz Zavin | 618d6be | 2013-02-13 14:48:11 -0800 | [diff] [blame] | 154 | * @lock: protects the free list |
| 155 | * @waitqueue: queue to wait on from deferred free thread |
| 156 | * @task: task struct of deferred free thread |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 157 | * @debug_show: called when heap debug file is read to add any |
| 158 | * heap specific debug info to output |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 159 | * |
| 160 | * Represents a pool of memory from which buffers can be made. In some |
| 161 | * systems the only heap is regular system memory allocated via vmalloc. |
| 162 | * On others, some blocks might require large physically contiguous buffers |
| 163 | * that are allocated from a specially reserved heap. |
| 164 | */ |
| 165 | struct ion_heap { |
Rebecca Schultz Zavin | 47b9888 | 2012-11-15 10:36:10 -0800 | [diff] [blame] | 166 | struct plist_node node; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 167 | struct ion_device *dev; |
| 168 | enum ion_heap_type type; |
| 169 | struct ion_heap_ops *ops; |
Rebecca Schultz Zavin | 618d6be | 2013-02-13 14:48:11 -0800 | [diff] [blame] | 170 | unsigned long flags; |
Rebecca Schultz Zavin | 47b9888 | 2012-11-15 10:36:10 -0800 | [diff] [blame] | 171 | unsigned int id; |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 172 | const char *name; |
Rebecca Schultz Zavin | 83ff5da | 2013-05-23 13:37:25 -0700 | [diff] [blame] | 173 | struct shrinker shrinker; |
Benjamin Gaignard | 8dff0a6 | 2012-06-25 15:30:18 -0700 | [diff] [blame] | 174 | void *priv; |
Rebecca Schultz Zavin | 618d6be | 2013-02-13 14:48:11 -0800 | [diff] [blame] | 175 | struct list_head free_list; |
Rebecca Schultz Zavin | 83ff5da | 2013-05-23 13:37:25 -0700 | [diff] [blame] | 176 | size_t free_list_size; |
Rebecca Schultz Zavin | 618d6be | 2013-02-13 14:48:11 -0800 | [diff] [blame] | 177 | struct rt_mutex lock; |
| 178 | wait_queue_head_t waitqueue; |
| 179 | struct task_struct *task; |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 180 | int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 181 | }; |
| 182 | |
| 183 | /** |
Rebecca Schultz Zavin | 943facc | 2012-08-06 21:37:23 -0700 | [diff] [blame] | 184 | * ion_buffer_cached - this ion buffer is cached |
| 185 | * @buffer: buffer |
| 186 | * |
| 187 | * indicates whether this ion buffer is cached |
| 188 | */ |
| 189 | bool ion_buffer_cached(struct ion_buffer *buffer); |
| 190 | |
| 191 | /** |
Rebecca Schultz Zavin | f858ba4 | 2012-09-21 11:46:06 -0700 | [diff] [blame] | 192 | * ion_buffer_fault_user_mappings - fault in user mappings of this buffer |
| 193 | * @buffer: buffer |
| 194 | * |
| 195 | * indicates whether userspace mappings of this buffer will be faulted |
| 196 | * in, this can affect how buffers are allocated from the heap. |
| 197 | */ |
| 198 | bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); |
| 199 | |
| 200 | /** |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 201 | * ion_device_create - allocates and returns an ion device |
| 202 | * @custom_ioctl: arch specific ioctl function if applicable |
| 203 | * |
| 204 | * returns a valid device or -PTR_ERR |
| 205 | */ |
| 206 | struct ion_device *ion_device_create(long (*custom_ioctl) |
| 207 | (struct ion_client *client, |
| 208 | unsigned int cmd, |
| 209 | unsigned long arg)); |
| 210 | |
| 211 | /** |
| 212 | * ion_device_destroy - free and device and it's resource |
| 213 | * @dev: the device |
| 214 | */ |
| 215 | void ion_device_destroy(struct ion_device *dev); |
| 216 | |
| 217 | /** |
| 218 | * ion_device_add_heap - adds a heap to the ion device |
| 219 | * @dev: the device |
| 220 | * @heap: the heap to add |
| 221 | */ |
| 222 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); |
| 223 | |
Mitchel Humpherys | 2bf68fc | 2013-10-03 17:51:49 -0700 | [diff] [blame] | 224 | struct pages_mem { |
| 225 | struct page **pages; |
| 226 | void (*free_fn) (const void *); |
| 227 | }; |
| 228 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 229 | /** |
Rebecca Schultz Zavin | 3df181c | 2012-11-15 10:43:46 -0800 | [diff] [blame] | 230 | * some helpers for common operations on buffers using the sg_table |
| 231 | * and vaddr fields |
| 232 | */ |
| 233 | void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *); |
| 234 | void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *); |
| 235 | int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, |
| 236 | struct vm_area_struct *); |
Mitchel Humpherys | 2bf68fc | 2013-10-03 17:51:49 -0700 | [diff] [blame] | 237 | int ion_heap_pages_zero(struct page **pages, int num_pages, |
| 238 | bool should_invalidate); |
Rebecca Schultz Zavin | ca12f5d | 2013-01-09 11:26:37 -0800 | [diff] [blame] | 239 | int ion_heap_buffer_zero(struct ion_buffer *buffer); |
Mitchel Humpherys | 2bf68fc | 2013-10-03 17:51:49 -0700 | [diff] [blame] | 240 | int ion_heap_high_order_page_zero(struct page *page, |
| 241 | int order, bool should_invalidate); |
Rebecca Schultz Zavin | 3df181c | 2012-11-15 10:43:46 -0800 | [diff] [blame] | 242 | |
Rebecca Schultz Zavin | 83ff5da | 2013-05-23 13:37:25 -0700 | [diff] [blame] | 243 | /** |
| 244 | * ion_heap_init_deferred_free -- initialize deferred free functionality |
| 245 | * @heap: the heap |
| 246 | * |
| 247 | * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will |
| 248 | * be called to setup deferred frees. Calls to free the buffer will |
| 249 | * return immediately and the actual free will occur some time later |
| 250 | */ |
| 251 | int ion_heap_init_deferred_free(struct ion_heap *heap); |
| 252 | |
| 253 | /** |
| 254 | * ion_heap_freelist_add - add a buffer to the deferred free list |
| 255 | * @heap: the heap |
| 256 | * @buffer: the buffer |
| 257 | * |
| 258 | * Adds an item to the deferred freelist. |
| 259 | */ |
| 260 | void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); |
| 261 | |
| 262 | /** |
| 263 | * ion_heap_freelist_drain - drain the deferred free list |
| 264 | * @heap: the heap |
| 265 | * @size: ammount of memory to drain in bytes |
| 266 | * |
| 267 | * Drains the indicated amount of memory from the deferred freelist immediately. |
| 268 | * Returns the total amount freed. The total freed may be higher depending |
| 269 | * on the size of the items in the list, or lower if there is insufficient |
| 270 | * total memory on the freelist. |
| 271 | */ |
| 272 | size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); |
| 273 | |
| 274 | /** |
Mitchel Humpherys | 5860743 | 2013-08-06 15:08:23 -0700 | [diff] [blame] | 275 | * ion_heap_freelist_drain_from_shrinker - drain the deferred free |
| 276 | * list, skipping any heap-specific |
| 277 | * pooling or caching mechanisms |
| 278 | * |
| 279 | * @heap: the heap |
| 280 | * @size: amount of memory to drain in bytes |
| 281 | * |
| 282 | * Drains the indicated amount of memory from the deferred freelist immediately. |
| 283 | * Returns the total amount freed. The total freed may be higher depending |
| 284 | * on the size of the items in the list, or lower if there is insufficient |
| 285 | * total memory on the freelist. |
| 286 | * |
| 287 | * Unlike with @ion_heap_freelist_drain, don't put any pages back into |
| 288 | * page pools or otherwise cache the pages. Everything must be |
| 289 | * genuinely free'd back to the system. If you're free'ing from a |
| 290 | * shrinker you probably want to use this. Note that this relies on |
| 291 | * the heap.ops.free callback honoring the |
| 292 | * ION_FLAG_FREED_FROM_SHRINKER flag. |
| 293 | */ |
| 294 | size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap, |
| 295 | size_t size); |
| 296 | |
| 297 | /** |
Rebecca Schultz Zavin | 83ff5da | 2013-05-23 13:37:25 -0700 | [diff] [blame] | 298 | * ion_heap_freelist_size - returns the size of the freelist in bytes |
| 299 | * @heap: the heap |
| 300 | */ |
| 301 | size_t ion_heap_freelist_size(struct ion_heap *heap); |
| 302 | |
Rebecca Schultz Zavin | 3df181c | 2012-11-15 10:43:46 -0800 | [diff] [blame] | 303 | |
| 304 | /** |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 305 | * functions for creating and destroying the built in ion heaps. |
| 306 | * architectures can add their own custom architecture specific |
| 307 | * heaps as appropriate. |
| 308 | */ |
| 309 | |
| 310 | struct ion_heap *ion_heap_create(struct ion_platform_heap *); |
| 311 | void ion_heap_destroy(struct ion_heap *); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 312 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); |
| 313 | void ion_system_heap_destroy(struct ion_heap *); |
| 314 | |
| 315 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); |
| 316 | void ion_system_contig_heap_destroy(struct ion_heap *); |
| 317 | |
| 318 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); |
| 319 | void ion_carveout_heap_destroy(struct ion_heap *); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 320 | |
Rebecca Schultz Zavin | d2ce6f8 | 2012-11-15 10:52:45 -0800 | [diff] [blame] | 321 | struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *); |
| 322 | void ion_chunk_heap_destroy(struct ion_heap *); |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 323 | /** |
| 324 | * kernel api to allocate/free from carveout -- used when carveout is |
| 325 | * used to back an architecture specific custom heap |
| 326 | */ |
| 327 | ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, |
| 328 | unsigned long align); |
| 329 | void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, |
| 330 | unsigned long size); |
| 331 | /** |
Laura Abbott | 61e8959 | 2013-03-21 10:55:17 -0700 | [diff] [blame] | 332 | * The carveout heap returns physical addresses, since 0 may be a valid |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 333 | * physical address, this is used to indicate allocation failed |
| 334 | */ |
| 335 | #define ION_CARVEOUT_ALLOCATE_FAIL -1 |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 336 | |
Rebecca Schultz Zavin | 050372e | 2012-06-07 16:36:44 -0700 | [diff] [blame] | 337 | /** |
| 338 | * functions for creating and destroying a heap pool -- allows you |
| 339 | * to keep a pool of pre allocated memory to use from your heap. Keeping |
| 340 | * a pool of memory that is ready for dma, ie any cached mapping have been |
| 341 | * invalidated from the cache, provides a significant peformance benefit on |
| 342 | * many systems */ |
| 343 | |
| 344 | /** |
| 345 | * struct ion_page_pool - pagepool struct |
Rebecca Schultz Zavin | 9fad2fe | 2012-10-08 23:01:23 -0700 | [diff] [blame] | 346 | * @high_count: number of highmem items in the pool |
| 347 | * @low_count: number of lowmem items in the pool |
| 348 | * @high_items: list of highmem items |
| 349 | * @low_items: list of lowmem items |
Rebecca Schultz Zavin | 050372e | 2012-06-07 16:36:44 -0700 | [diff] [blame] | 350 | * @shrinker: a shrinker for the items |
| 351 | * @mutex: lock protecting this struct and especially the count |
| 352 | * item list |
| 353 | * @alloc: function to be used to allocate pageory when the pool |
| 354 | * is empty |
| 355 | * @free: function to be used to free pageory back to the system |
| 356 | * when the shrinker fires |
| 357 | * @gfp_mask: gfp_mask to use from alloc |
| 358 | * @order: order of pages in the pool |
Rebecca Schultz Zavin | 8afce33 | 2012-10-10 14:19:17 -0700 | [diff] [blame] | 359 | * @list: plist node for list of pools |
Mitchel Humpherys | 2bf68fc | 2013-10-03 17:51:49 -0700 | [diff] [blame] | 360 | * @should_invalidate: whether or not the cache needs to be invalidated at |
| 361 | * page allocation time. |
Rebecca Schultz Zavin | 050372e | 2012-06-07 16:36:44 -0700 | [diff] [blame] | 362 | * |
| 363 | * Allows you to keep a pool of pre allocated pages to use from your heap. |
| 364 | * Keeping a pool of pages that is ready for dma, ie any cached mapping have |
| 365 | * been invalidated from the cache, provides a significant peformance benefit |
| 366 | * on many systems |
| 367 | */ |
| 368 | struct ion_page_pool { |
Rebecca Schultz Zavin | 9fad2fe | 2012-10-08 23:01:23 -0700 | [diff] [blame] | 369 | int high_count; |
| 370 | int low_count; |
| 371 | struct list_head high_items; |
| 372 | struct list_head low_items; |
Rebecca Schultz Zavin | 050372e | 2012-06-07 16:36:44 -0700 | [diff] [blame] | 373 | struct mutex mutex; |
Rebecca Schultz Zavin | 050372e | 2012-06-07 16:36:44 -0700 | [diff] [blame] | 374 | gfp_t gfp_mask; |
| 375 | unsigned int order; |
Rebecca Schultz Zavin | 8afce33 | 2012-10-10 14:19:17 -0700 | [diff] [blame] | 376 | struct plist_node list; |
Mitchel Humpherys | 2bf68fc | 2013-10-03 17:51:49 -0700 | [diff] [blame] | 377 | bool should_invalidate; |
Rebecca Schultz Zavin | 050372e | 2012-06-07 16:36:44 -0700 | [diff] [blame] | 378 | }; |
| 379 | |
Mitchel Humpherys | 2bf68fc | 2013-10-03 17:51:49 -0700 | [diff] [blame] | 380 | struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order, |
| 381 | bool should_invalidate); |
Rebecca Schultz Zavin | 050372e | 2012-06-07 16:36:44 -0700 | [diff] [blame] | 382 | void ion_page_pool_destroy(struct ion_page_pool *); |
| 383 | void *ion_page_pool_alloc(struct ion_page_pool *); |
| 384 | void ion_page_pool_free(struct ion_page_pool *, struct page *); |
| 385 | |
Rebecca Schultz Zavin | 83ff5da | 2013-05-23 13:37:25 -0700 | [diff] [blame] | 386 | /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool |
| 387 | * @pool: the pool |
| 388 | * @gfp_mask: the memory type to reclaim |
| 389 | * @nr_to_scan: number of items to shrink in pages |
| 390 | * |
| 391 | * returns the number of items freed in pages |
| 392 | */ |
| 393 | int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, |
| 394 | int nr_to_scan); |
| 395 | |
Rebecca Schultz Zavin | 0c38bfd | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 396 | #endif /* _ION_PRIV_H */ |