Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1 | /* |
| 2 | * drivers/staging/android/ion/ion_priv.h |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
Sudarshan Rajagopalan | 33ae043 | 2017-05-18 00:12:53 -0700 | [diff] [blame] | 5 | * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | |
| 18 | #ifndef _ION_PRIV_H |
| 19 | #define _ION_PRIV_H |
| 20 | |
John Stultz | a33b2fc | 2014-02-04 16:08:40 -0800 | [diff] [blame] | 21 | #include <linux/device.h> |
Colin Cross | e946b20 | 2013-12-13 14:25:01 -0800 | [diff] [blame] | 22 | #include <linux/dma-direction.h> |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 23 | #include <linux/kref.h> |
| 24 | #include <linux/mm_types.h> |
| 25 | #include <linux/mutex.h> |
| 26 | #include <linux/rbtree.h> |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 27 | #include <linux/seq_file.h> |
| 28 | |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 29 | #include <linux/sched.h> |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 30 | #include <linux/shrinker.h> |
| 31 | #include <linux/types.h> |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 32 | #ifdef CONFIG_ION_POOL_CACHE_POLICY |
| 33 | #include <asm/cacheflush.h> |
| 34 | #endif |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 35 | #include <linux/device.h> |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 36 | |
| 37 | #include "ion.h" |
| 38 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 39 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); |
| 40 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 41 | /** |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 42 | * struct mem_map_data - represents information about the memory map for a heap |
| 43 | * @node: list node used to store in the list of mem_map_data |
| 44 | * @addr: start address of memory region. |
| 45 | * @addr: end address of memory region. |
| 46 | * @size: size of memory region |
| 47 | * @client_name: name of the client who owns this buffer. |
| 48 | * |
| 49 | */ |
| 50 | struct mem_map_data { |
| 51 | struct list_head node; |
| 52 | ion_phys_addr_t addr; |
| 53 | ion_phys_addr_t addr_end; |
| 54 | unsigned long size; |
| 55 | const char *client_name; |
| 56 | }; |
| 57 | |
| 58 | /** |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 59 | * struct ion_buffer - metadata for a particular buffer |
Carlos E. Garcia | 69e98df | 2015-04-24 09:40:42 -0400 | [diff] [blame] | 60 | * @ref: reference count |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 61 | * @node: node in the ion_device buffers tree |
| 62 | * @dev: back pointer to the ion_device |
| 63 | * @heap: back pointer to the heap the buffer came from |
| 64 | * @flags: buffer specific flags |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 65 | * @private_flags: internal buffer specific flags |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 66 | * @size: size of the buffer |
| 67 | * @priv_virt: private data to the buffer representable as |
| 68 | * a void * |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 69 | * @priv_phys: private data to the buffer representable as |
| 70 | * an ion_phys_addr_t (and someday a phys_addr_t) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 71 | * @lock: protects the buffers cnt fields |
| 72 | * @kmap_cnt: number of times the buffer is mapped to the kernel |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 73 | * @vaddr: the kenrel mapping if kmap_cnt is not zero |
| 74 | * @sg_table: the sg table for the buffer. Note that if you need |
| 75 | * an sg_table for this buffer, you should likely be |
| 76 | * using Ion as a DMA Buf exporter and using |
| 77 | * dma_buf_map_attachment rather than trying to use this |
| 78 | * field directly. |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 79 | * @pages: flat array of pages in the buffer -- used by fault |
| 80 | * handler and only valid for buffers that are faulted in |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 81 | * @vmas: list of vma's mapping this buffer |
| 82 | * @handle_count: count of handles referencing this buffer |
| 83 | * @task_comm: taskcomm of last client to reference this buffer in a |
| 84 | * handle, used for debugging |
| 85 | * @pid: pid of last client to reference this buffer in a |
| 86 | * handle, used for debugging |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 87 | */ |
| 88 | struct ion_buffer { |
| 89 | struct kref ref; |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 90 | union { |
| 91 | struct rb_node node; |
| 92 | struct list_head list; |
| 93 | }; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 94 | struct ion_device *dev; |
| 95 | struct ion_heap *heap; |
| 96 | unsigned long flags; |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 97 | unsigned long private_flags; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 98 | size_t size; |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 99 | union { |
| 100 | void *priv_virt; |
| 101 | ion_phys_addr_t priv_phys; |
| 102 | }; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 103 | struct mutex lock; |
| 104 | int kmap_cnt; |
| 105 | void *vaddr; |
Rebecca Schultz Zavin | 4d5ca32 | 2013-12-13 14:23:37 -0800 | [diff] [blame] | 106 | struct sg_table *sg_table; |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 107 | struct page **pages; |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 108 | struct list_head vmas; |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 109 | /* used to track orphaned buffers */ |
| 110 | int handle_count; |
| 111 | char task_comm[TASK_COMM_LEN]; |
| 112 | pid_t pid; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 113 | }; |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 114 | void ion_buffer_destroy(struct ion_buffer *buffer); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 115 | |
| 116 | /** |
| 117 | * struct ion_heap_ops - ops to operate on a given heap |
| 118 | * @allocate: allocate memory |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 119 | * @free: free memory. Will be called with |
| 120 | * ION_PRIV_FLAG_SHRINKER_FREE set in buffer flags when |
| 121 | * called from a shrinker. In that case, the pages being |
| 122 | * free'd must be truly free'd back to the system, not put |
| 123 | * in a page pool or otherwise cached. |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 124 | * @phys get physical address of a buffer (only define on |
| 125 | * physically contiguous heaps) |
| 126 | * @map_dma map the memory for dma to a scatterlist |
| 127 | * @unmap_dma unmap the memory for dma |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 128 | * @map_kernel map memory to the kernel |
| 129 | * @unmap_kernel unmap memory to the kernel |
| 130 | * @map_user map memory to userspace |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 131 | * @unmap_user unmap memory to userspace |
Colin Cross | 9e90765 | 2013-12-13 14:24:49 -0800 | [diff] [blame] | 132 | * |
| 133 | * allocate, phys, and map_user return 0 on success, -errno on error. |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 134 | * map_dma and map_kernel return pointer on success, ERR_PTR on |
| 135 | * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in |
| 136 | * the buffer's private_flags when called from a shrinker. In that |
| 137 | * case, the pages being free'd must be truly free'd back to the |
| 138 | * system, not put in a page pool or otherwise cached. |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 139 | */ |
| 140 | struct ion_heap_ops { |
Daeseok Youn | 5110898 | 2014-02-10 20:16:50 +0900 | [diff] [blame] | 141 | int (*allocate)(struct ion_heap *heap, |
| 142 | struct ion_buffer *buffer, unsigned long len, |
| 143 | unsigned long align, unsigned long flags); |
| 144 | void (*free)(struct ion_buffer *buffer); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 145 | int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer, |
| 146 | ion_phys_addr_t *addr, size_t *len); |
| 147 | struct sg_table * (*map_dma)(struct ion_heap *heap, |
| 148 | struct ion_buffer *buffer); |
| 149 | void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer); |
Daeseok Youn | 5110898 | 2014-02-10 20:16:50 +0900 | [diff] [blame] | 150 | void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); |
| 151 | void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); |
| 152 | int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, |
| 153 | struct vm_area_struct *vma); |
Colin Cross | b9daf0b | 2014-02-17 13:58:38 -0800 | [diff] [blame] | 154 | int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 155 | void (*unmap_user)(struct ion_heap *mapper, struct ion_buffer *buffer); |
| 156 | int (*print_debug)(struct ion_heap *heap, struct seq_file *s, |
| 157 | const struct list_head *mem_map); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 158 | }; |
| 159 | |
| 160 | /** |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 161 | * heap flags - flags between the heaps and core ion code |
| 162 | */ |
| 163 | #define ION_HEAP_FLAG_DEFER_FREE (1 << 0) |
| 164 | |
| 165 | /** |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 166 | * private flags - flags internal to ion |
| 167 | */ |
| 168 | /* |
| 169 | * Buffer is being freed from a shrinker function. Skip any possible |
| 170 | * heap-specific caching mechanism (e.g. page pools). Guarantees that |
| 171 | * any buffer storage that came from the system allocator will be |
| 172 | * returned to the system allocator. |
| 173 | */ |
| 174 | #define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0) |
| 175 | |
| 176 | /** |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 177 | * struct ion_heap - represents a heap in the system |
| 178 | * @node: rb node to put the heap on the device's tree of heaps |
| 179 | * @dev: back pointer to the ion_device |
| 180 | * @type: type of heap |
| 181 | * @ops: ops struct as above |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 182 | * @flags: flags |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 183 | * @id: id of heap, also indicates priority of this heap when |
| 184 | * allocating. These are specified by platform data and |
| 185 | * MUST be unique |
| 186 | * @name: used for debugging |
Colin Cross | b9daf0b | 2014-02-17 13:58:38 -0800 | [diff] [blame] | 187 | * @shrinker: a shrinker for the heap |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 188 | * @priv: private heap data |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 189 | * @free_list: free list head if deferred free is used |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 190 | * @free_list_size size of the deferred free list in bytes |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 191 | * @lock: protects the free list |
| 192 | * @waitqueue: queue to wait on from deferred free thread |
| 193 | * @task: task struct of deferred free thread |
Rebecca Schultz Zavin | 45b17a8 | 2013-12-13 14:24:11 -0800 | [diff] [blame] | 194 | * @debug_show: called when heap debug file is read to add any |
| 195 | * heap specific debug info to output |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 196 | * |
| 197 | * Represents a pool of memory from which buffers can be made. In some |
| 198 | * systems the only heap is regular system memory allocated via vmalloc. |
| 199 | * On others, some blocks might require large physically contiguous buffers |
| 200 | * that are allocated from a specially reserved heap. |
| 201 | */ |
| 202 | struct ion_heap { |
Rebecca Schultz Zavin | cd69488 | 2013-12-13 14:24:25 -0800 | [diff] [blame] | 203 | struct plist_node node; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 204 | struct ion_device *dev; |
| 205 | enum ion_heap_type type; |
| 206 | struct ion_heap_ops *ops; |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 207 | unsigned long flags; |
Rebecca Schultz Zavin | cd69488 | 2013-12-13 14:24:25 -0800 | [diff] [blame] | 208 | unsigned int id; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 209 | const char *name; |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 210 | struct shrinker shrinker; |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 211 | void *priv; |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 212 | struct list_head free_list; |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 213 | size_t free_list_size; |
John Stultz | 6a72a70 | 2013-12-17 17:04:29 -0800 | [diff] [blame] | 214 | spinlock_t free_lock; |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 215 | wait_queue_head_t waitqueue; |
| 216 | struct task_struct *task; |
Seunghun Lee | 10f6286 | 2014-05-01 01:30:23 +0900 | [diff] [blame] | 217 | |
Rebecca Schultz Zavin | 45b17a8 | 2013-12-13 14:24:11 -0800 | [diff] [blame] | 218 | int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); |
Patrick Daly | e464006 | 2017-08-01 19:56:52 -0700 | [diff] [blame] | 219 | atomic_long_t total_allocated; |
| 220 | atomic_long_t total_handles; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 221 | }; |
| 222 | |
| 223 | /** |
Rebecca Schultz Zavin | 45b17a8 | 2013-12-13 14:24:11 -0800 | [diff] [blame] | 224 | * ion_buffer_cached - this ion buffer is cached |
| 225 | * @buffer: buffer |
| 226 | * |
| 227 | * indicates whether this ion buffer is cached |
| 228 | */ |
| 229 | bool ion_buffer_cached(struct ion_buffer *buffer); |
| 230 | |
| 231 | /** |
Rebecca Schultz Zavin | 13ba780 | 2013-12-13 14:24:06 -0800 | [diff] [blame] | 232 | * ion_buffer_fault_user_mappings - fault in user mappings of this buffer |
| 233 | * @buffer: buffer |
| 234 | * |
| 235 | * indicates whether userspace mappings of this buffer will be faulted |
| 236 | * in, this can affect how buffers are allocated from the heap. |
| 237 | */ |
| 238 | bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); |
| 239 | |
| 240 | /** |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 241 | * ion_device_create - allocates and returns an ion device |
| 242 | * @custom_ioctl: arch specific ioctl function if applicable |
| 243 | * |
| 244 | * returns a valid device or -PTR_ERR |
| 245 | */ |
| 246 | struct ion_device *ion_device_create(long (*custom_ioctl) |
| 247 | (struct ion_client *client, |
| 248 | unsigned int cmd, |
| 249 | unsigned long arg)); |
| 250 | |
| 251 | /** |
| 252 | * ion_device_destroy - free and device and it's resource |
| 253 | * @dev: the device |
| 254 | */ |
| 255 | void ion_device_destroy(struct ion_device *dev); |
| 256 | |
| 257 | /** |
| 258 | * ion_device_add_heap - adds a heap to the ion device |
| 259 | * @dev: the device |
| 260 | * @heap: the heap to add |
| 261 | */ |
| 262 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); |
| 263 | |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 264 | struct pages_mem { |
| 265 | struct page **pages; |
| 266 | u32 size; |
| 267 | void (*free_fn)(const void *); |
| 268 | }; |
| 269 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 270 | /** |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 271 | * some helpers for common operations on buffers using the sg_table |
| 272 | * and vaddr fields |
| 273 | */ |
| 274 | void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *); |
| 275 | void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *); |
| 276 | int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, |
| 277 | struct vm_area_struct *); |
Rebecca Schultz Zavin | 0b6b2cd | 2013-12-13 14:24:32 -0800 | [diff] [blame] | 278 | int ion_heap_buffer_zero(struct ion_buffer *buffer); |
Colin Cross | df6cf5c | 2013-12-13 19:26:30 -0800 | [diff] [blame] | 279 | int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 280 | |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 281 | int msm_ion_heap_high_order_page_zero(struct device *dev, struct page *page, |
| 282 | int order); |
Patrick Daly | c1005d8 | 2016-09-22 17:43:26 -0700 | [diff] [blame] | 283 | struct ion_heap *get_ion_heap(int heap_id); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 284 | int msm_ion_heap_sg_table_zero(struct device *dev, struct sg_table *sg, |
| 285 | size_t size); |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 286 | int msm_ion_heap_pages_zero(struct page **pages, int num_pages); |
| 287 | int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem); |
| 288 | void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem); |
| 289 | |
| 290 | long msm_ion_custom_ioctl(struct ion_client *client, |
| 291 | unsigned int cmd, |
| 292 | unsigned long arg); |
| 293 | |
Patrick Daly | c1005d8 | 2016-09-22 17:43:26 -0700 | [diff] [blame] | 294 | int ion_heap_is_system_secure_heap_type(enum ion_heap_type type); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 295 | int get_secure_vmid(unsigned long flags); |
Sudarshan Rajagopalan | acc4a03 | 2017-07-20 16:56:20 -0700 | [diff] [blame] | 296 | int get_vmid(unsigned long flags); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 297 | bool is_secure_vmid_valid(int vmid); |
Sudarshan Rajagopalan | 33ae043 | 2017-05-18 00:12:53 -0700 | [diff] [blame] | 298 | unsigned int count_set_bits(unsigned long val); |
| 299 | int populate_vm_list(unsigned long flags, unsigned int *vm_list, int nelems); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 300 | |
| 301 | /** |
| 302 | * Functions to help assign/unassign sg_table for System Secure Heap |
| 303 | */ |
| 304 | |
| 305 | int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid); |
| 306 | int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid); |
| 307 | int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *data); |
| 308 | int ion_system_secure_heap_drain(struct ion_heap *heap, void *data); |
Patrick Daly | c1005d8 | 2016-09-22 17:43:26 -0700 | [diff] [blame] | 309 | |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 310 | /** |
| 311 | * ion_heap_init_shrinker |
| 312 | * @heap: the heap |
| 313 | * |
| 314 | * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op |
| 315 | * this function will be called to setup a shrinker to shrink the freelists |
| 316 | * and call the heap's shrink op. |
| 317 | */ |
| 318 | void ion_heap_init_shrinker(struct ion_heap *heap); |
| 319 | |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 320 | /** |
Colin Cross | b9daf0b | 2014-02-17 13:58:38 -0800 | [diff] [blame] | 321 | * ion_heap_init_shrinker |
| 322 | * @heap: the heap |
| 323 | * |
| 324 | * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op |
| 325 | * this function will be called to setup a shrinker to shrink the freelists |
| 326 | * and call the heap's shrink op. |
| 327 | */ |
| 328 | void ion_heap_init_shrinker(struct ion_heap *heap); |
| 329 | |
| 330 | /** |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 331 | * ion_heap_init_deferred_free -- initialize deferred free functionality |
| 332 | * @heap: the heap |
| 333 | * |
| 334 | * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will |
| 335 | * be called to setup deferred frees. Calls to free the buffer will |
| 336 | * return immediately and the actual free will occur some time later |
| 337 | */ |
| 338 | int ion_heap_init_deferred_free(struct ion_heap *heap); |
| 339 | |
| 340 | /** |
| 341 | * ion_heap_freelist_add - add a buffer to the deferred free list |
| 342 | * @heap: the heap |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 343 | * @buffer: the buffer |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 344 | * |
| 345 | * Adds an item to the deferred freelist. |
| 346 | */ |
| 347 | void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); |
| 348 | |
| 349 | /** |
| 350 | * ion_heap_freelist_drain - drain the deferred free list |
| 351 | * @heap: the heap |
Carlos E. Garcia | 69e98df | 2015-04-24 09:40:42 -0400 | [diff] [blame] | 352 | * @size: amount of memory to drain in bytes |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 353 | * |
| 354 | * Drains the indicated amount of memory from the deferred freelist immediately. |
| 355 | * Returns the total amount freed. The total freed may be higher depending |
| 356 | * on the size of the items in the list, or lower if there is insufficient |
| 357 | * total memory on the freelist. |
| 358 | */ |
| 359 | size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); |
| 360 | |
| 361 | /** |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 362 | * ion_heap_freelist_drain_from_shrinker - drain the deferred free |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 363 | * list, skipping any heap-specific |
| 364 | * pooling or caching mechanisms |
| 365 | * |
| 366 | * @heap: the heap |
| 367 | * @size: amount of memory to drain in bytes |
| 368 | * |
| 369 | * Drains the indicated amount of memory from the deferred freelist immediately. |
| 370 | * Returns the total amount freed. The total freed may be higher depending |
| 371 | * on the size of the items in the list, or lower if there is insufficient |
| 372 | * total memory on the freelist. |
| 373 | * |
| 374 | * Unlike with @ion_heap_freelist_drain, don't put any pages back into |
| 375 | * page pools or otherwise cache the pages. Everything must be |
| 376 | * genuinely free'd back to the system. If you're free'ing from a |
| 377 | * shrinker you probably want to use this. Note that this relies on |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 378 | * the heap.ops.free callback honoring the |
| 379 | * ION_PRIV_FLAG_SHRINKER_FREE flag. |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 380 | */ |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 381 | size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap, |
| 382 | size_t size); |
Mitchel Humpherys | 53a91c6 | 2014-02-17 13:58:39 -0800 | [diff] [blame] | 383 | |
| 384 | /** |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 385 | * ion_heap_freelist_size - returns the size of the freelist in bytes |
| 386 | * @heap: the heap |
| 387 | */ |
| 388 | size_t ion_heap_freelist_size(struct ion_heap *heap); |
| 389 | |
Rebecca Schultz Zavin | 8898227 | 2013-12-13 14:24:26 -0800 | [diff] [blame] | 390 | |
| 391 | /** |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 392 | * functions for creating and destroying the built in ion heaps. |
| 393 | * architectures can add their own custom architecture specific |
| 394 | * heaps as appropriate. |
| 395 | */ |
| 396 | |
| 397 | struct ion_heap *ion_heap_create(struct ion_platform_heap *); |
| 398 | void ion_heap_destroy(struct ion_heap *); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 399 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); |
| 400 | void ion_system_heap_destroy(struct ion_heap *); |
| 401 | |
| 402 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); |
| 403 | void ion_system_contig_heap_destroy(struct ion_heap *); |
| 404 | |
| 405 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); |
| 406 | void ion_carveout_heap_destroy(struct ion_heap *); |
Rebecca Schultz Zavin | e3c2eb7 | 2013-12-13 14:24:27 -0800 | [diff] [blame] | 407 | |
| 408 | struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *); |
| 409 | void ion_chunk_heap_destroy(struct ion_heap *); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 410 | #ifdef CONFIG_CMA |
Benjamin Gaignard | 349c9e1 | 2013-12-13 14:24:44 -0800 | [diff] [blame] | 411 | struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); |
| 412 | void ion_cma_heap_destroy(struct ion_heap *); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 413 | #else |
| 414 | static inline struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *h) |
| 415 | { |
| 416 | return NULL; |
| 417 | } |
| 418 | |
| 419 | static inline void ion_cma_heap_destroy(struct ion_heap *h) {} |
| 420 | #endif |
Benjamin Gaignard | 349c9e1 | 2013-12-13 14:24:44 -0800 | [diff] [blame] | 421 | |
Patrick Daly | c1005d8 | 2016-09-22 17:43:26 -0700 | [diff] [blame] | 422 | struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *heap); |
| 423 | void ion_system_secure_heap_destroy(struct ion_heap *heap); |
| 424 | |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 425 | struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap); |
| 426 | void ion_cma_secure_heap_destroy(struct ion_heap *heap); |
| 427 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 428 | /** |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 429 | * kernel api to allocate/free from carveout -- used when carveout is |
| 430 | * used to back an architecture specific custom heap |
| 431 | */ |
| 432 | ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, |
| 433 | unsigned long align); |
| 434 | void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, |
| 435 | unsigned long size); |
| 436 | /** |
| 437 | * The carveout heap returns physical addresses, since 0 may be a valid |
| 438 | * physical address, this is used to indicate allocation failed |
| 439 | */ |
| 440 | #define ION_CARVEOUT_ALLOCATE_FAIL -1 |
| 441 | |
| 442 | /** |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 443 | * functions for creating and destroying a heap pool -- allows you |
| 444 | * to keep a pool of pre allocated memory to use from your heap. Keeping |
| 445 | * a pool of memory that is ready for dma, ie any cached mapping have been |
Tristan Lelong | bc47e7d | 2014-10-31 16:31:31 -0700 | [diff] [blame] | 446 | * invalidated from the cache, provides a significant performance benefit on |
Sriram Raghunathan | 7e41617 | 2015-09-22 22:35:51 +0530 | [diff] [blame] | 447 | * many systems |
| 448 | */ |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 449 | |
| 450 | /** |
| 451 | * struct ion_page_pool - pagepool struct |
Rebecca Schultz Zavin | 0fb9b81 | 2013-12-13 14:24:13 -0800 | [diff] [blame] | 452 | * @high_count: number of highmem items in the pool |
| 453 | * @low_count: number of lowmem items in the pool |
| 454 | * @high_items: list of highmem items |
| 455 | * @low_items: list of lowmem items |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 456 | * @mutex: lock protecting this struct and especially the count |
| 457 | * item list |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 458 | * @gfp_mask: gfp_mask to use from alloc |
| 459 | * @order: order of pages in the pool |
Rebecca Schultz Zavin | 797a95c | 2013-12-13 14:24:15 -0800 | [diff] [blame] | 460 | * @list: plist node for list of pools |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 461 | * |
| 462 | * Allows you to keep a pool of pre allocated pages to use from your heap. |
| 463 | * Keeping a pool of pages that is ready for dma, ie any cached mapping have |
Tristan Lelong | bc47e7d | 2014-10-31 16:31:31 -0700 | [diff] [blame] | 464 | * been invalidated from the cache, provides a significant performance benefit |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 465 | * on many systems |
| 466 | */ |
| 467 | struct ion_page_pool { |
Rebecca Schultz Zavin | 0fb9b81 | 2013-12-13 14:24:13 -0800 | [diff] [blame] | 468 | int high_count; |
| 469 | int low_count; |
| 470 | struct list_head high_items; |
| 471 | struct list_head low_items; |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 472 | struct mutex mutex; |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 473 | struct device *dev; |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 474 | gfp_t gfp_mask; |
| 475 | unsigned int order; |
Rebecca Schultz Zavin | 797a95c | 2013-12-13 14:24:15 -0800 | [diff] [blame] | 476 | struct plist_node list; |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 477 | }; |
| 478 | |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 479 | struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask, |
| 480 | unsigned int order); |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 481 | void ion_page_pool_destroy(struct ion_page_pool *); |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 482 | void *ion_page_pool_alloc(struct ion_page_pool *a, bool *from_pool); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 483 | void *ion_page_pool_alloc_pool_only(struct ion_page_pool *a); |
| 484 | void ion_page_pool_free(struct ion_page_pool *a, struct page *b); |
Vinil Cheeramvelil | 21ed404 | 2015-07-08 10:35:06 +0800 | [diff] [blame] | 485 | void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 486 | int ion_page_pool_total(struct ion_page_pool *pool, bool high); |
| 487 | size_t ion_system_heap_secure_page_pool_total(struct ion_heap *heap, int vmid); |
Vinil Cheeramvelil | 21ed404 | 2015-07-08 10:35:06 +0800 | [diff] [blame] | 488 | |
| 489 | #ifdef CONFIG_ION_POOL_CACHE_POLICY |
| 490 | static inline void ion_page_pool_alloc_set_cache_policy |
| 491 | (struct ion_page_pool *pool, |
| 492 | struct page *page){ |
| 493 | void *va = page_address(page); |
| 494 | |
| 495 | if (va) |
| 496 | set_memory_wc((unsigned long)va, 1 << pool->order); |
| 497 | } |
| 498 | |
| 499 | static inline void ion_page_pool_free_set_cache_policy |
| 500 | (struct ion_page_pool *pool, |
| 501 | struct page *page){ |
| 502 | void *va = page_address(page); |
| 503 | |
| 504 | if (va) |
| 505 | set_memory_wb((unsigned long)va, 1 << pool->order); |
| 506 | |
| 507 | } |
| 508 | #else |
| 509 | static inline void ion_page_pool_alloc_set_cache_policy |
| 510 | (struct ion_page_pool *pool, |
| 511 | struct page *page){ } |
| 512 | |
| 513 | static inline void ion_page_pool_free_set_cache_policy |
| 514 | (struct ion_page_pool *pool, |
| 515 | struct page *page){ } |
| 516 | #endif |
| 517 | |
Rebecca Schultz Zavin | 0214c7f | 2013-12-13 14:24:10 -0800 | [diff] [blame] | 518 | |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 519 | /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool |
| 520 | * @pool: the pool |
| 521 | * @gfp_mask: the memory type to reclaim |
| 522 | * @nr_to_scan: number of items to shrink in pages |
| 523 | * |
| 524 | * returns the number of items freed in pages |
| 525 | */ |
| 526 | int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, |
| 527 | int nr_to_scan); |
| 528 | |
Colin Cross | e946b20 | 2013-12-13 14:25:01 -0800 | [diff] [blame] | 529 | /** |
| 530 | * ion_pages_sync_for_device - cache flush pages for use with the specified |
| 531 | * device |
| 532 | * @dev: the device the pages will be used with |
| 533 | * @page: the first page to be flushed |
| 534 | * @size: size in bytes of region to be flushed |
| 535 | * @dir: direction of dma transfer |
| 536 | */ |
| 537 | void ion_pages_sync_for_device(struct device *dev, struct page *page, |
| 538 | size_t size, enum dma_data_direction dir); |
| 539 | |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 540 | int ion_walk_heaps(struct ion_client *client, int heap_id, |
| 541 | enum ion_heap_type type, void *data, |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 542 | int (*f)(struct ion_heap *heap, void *data)); |
| 543 | |
| 544 | struct ion_handle *ion_handle_get_by_id(struct ion_client *client, |
| 545 | int id); |
| 546 | |
| 547 | int ion_handle_put(struct ion_handle *handle); |
| 548 | |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 549 | void show_ion_usage(struct ion_device *dev); |
| 550 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 551 | #endif /* _ION_PRIV_H */ |