Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1 | /* |
Sriram Raghunathan | 7e41617 | 2015-09-22 22:35:51 +0530 | [diff] [blame] | 2 | * |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 3 | * drivers/staging/android/ion/ion.c |
| 4 | * |
| 5 | * Copyright (C) 2011 Google, Inc. |
Swetha Chikkaboraiah | 6186c32 | 2020-08-05 16:51:54 +0530 | [diff] [blame] | 6 | * Copyright (c) 2011-2018, 2020, The Linux Foundation. All rights reserved. |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 7 | * |
| 8 | * This software is licensed under the terms of the GNU General Public |
| 9 | * License version 2, as published by the Free Software Foundation, and |
| 10 | * may be copied, distributed, and modified under those terms. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. |
| 16 | * |
| 17 | */ |
| 18 | |
Daniel Rosenberg | 20746c1 | 2016-12-05 16:28:28 -0800 | [diff] [blame] | 19 | #include <linux/atomic.h> |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 20 | #include <linux/device.h> |
Sachin Kamat | ab0c069 | 2014-01-27 12:17:05 +0530 | [diff] [blame] | 21 | #include <linux/err.h> |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 22 | #include <linux/file.h> |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 23 | #include <linux/freezer.h> |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 24 | #include <linux/fs.h> |
| 25 | #include <linux/anon_inodes.h> |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 26 | #include <linux/kthread.h> |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 27 | #include <linux/list.h> |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 28 | #include <linux/list_sort.h> |
Rebecca Schultz Zavin | 2991b7a | 2013-12-13 14:23:38 -0800 | [diff] [blame] | 29 | #include <linux/memblock.h> |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 30 | #include <linux/miscdevice.h> |
| 31 | #include <linux/export.h> |
| 32 | #include <linux/mm.h> |
| 33 | #include <linux/mm_types.h> |
| 34 | #include <linux/rbtree.h> |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 35 | #include <linux/slab.h> |
| 36 | #include <linux/seq_file.h> |
| 37 | #include <linux/uaccess.h> |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 38 | #include <linux/vmalloc.h> |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 39 | #include <linux/debugfs.h> |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 40 | #include <linux/dma-buf.h> |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 41 | #include <linux/idr.h> |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 42 | #include <linux/msm_ion.h> |
Olav Haugan | ff0116e | 2015-05-28 17:21:45 -0700 | [diff] [blame] | 43 | #include <linux/msm_dma_iommu_mapping.h> |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 44 | #include <trace/events/kmem.h> |
| 45 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 46 | |
| 47 | #include "ion.h" |
| 48 | #include "ion_priv.h" |
Rom Lemarchand | 827c849 | 2013-12-13 14:24:55 -0800 | [diff] [blame] | 49 | #include "compat_ion.h" |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 50 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 51 | /** |
| 52 | * struct ion_device - the metadata of the ion device node |
| 53 | * @dev: the actual misc device |
| 54 | * @buffers: an rb tree of all the existing buffers |
| 55 | * @buffer_lock: lock protecting the tree of buffers |
| 56 | * @lock: rwsem protecting the tree of heaps and clients |
| 57 | * @heaps: list of all the heaps in the system |
| 58 | * @user_clients: list of all the clients created from userspace |
| 59 | */ |
| 60 | struct ion_device { |
| 61 | struct miscdevice dev; |
| 62 | struct rb_root buffers; |
| 63 | /* Protects rb_tree */ |
| 64 | struct mutex buffer_lock; |
| 65 | struct rw_semaphore lock; |
| 66 | struct plist_head heaps; |
| 67 | long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, |
| 68 | unsigned long arg); |
| 69 | struct rb_root clients; |
| 70 | struct dentry *debug_root; |
| 71 | struct dentry *heaps_debug_root; |
| 72 | struct dentry *clients_debug_root; |
| 73 | }; |
| 74 | |
| 75 | /** |
| 76 | * struct ion_client - a process/hw block local address space |
| 77 | * @node: node in the tree of all clients |
| 78 | * @dev: backpointer to ion device |
| 79 | * @handles: an rb tree of all the handles in this client |
| 80 | * @idr: an idr space for allocating handle ids |
Patrick Daly | 60f0d9a | 2017-06-30 17:16:21 -0700 | [diff] [blame] | 81 | * @lock: lock protecting the tree of handles and idr |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 82 | * @name: used for debugging |
| 83 | * @display_name: used for debugging (unique version of @name) |
| 84 | * @display_serial: used for debugging (to make display_name unique) |
| 85 | * @task: used for debugging |
| 86 | * |
| 87 | * A client represents a list of buffers this client may access. |
| 88 | * The mutex stored here is used to protect both handles tree |
| 89 | * as well as the handles themselves, and should be held while modifying either. |
| 90 | */ |
| 91 | struct ion_client { |
| 92 | struct rb_node node; |
| 93 | struct ion_device *dev; |
| 94 | struct rb_root handles; |
| 95 | struct idr idr; |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 96 | struct mutex lock; |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 97 | char *name; |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 98 | char *display_name; |
| 99 | int display_serial; |
| 100 | struct task_struct *task; |
| 101 | pid_t pid; |
| 102 | struct dentry *debug_root; |
| 103 | }; |
| 104 | |
| 105 | /** |
| 106 | * ion_handle - a client local reference to a buffer |
| 107 | * @ref: reference count |
| 108 | * @client: back pointer to the client the buffer resides in |
| 109 | * @buffer: pointer to the buffer |
| 110 | * @node: node in the client's handle rbtree |
| 111 | * @kmap_cnt: count of times this client has mapped to kernel |
| 112 | * @id: client-unique id allocated by client->idr |
| 113 | * |
| 114 | * Modifications to node, map_cnt or mapping should be protected by the |
| 115 | * lock in the client. Other fields are never changed after initialization. |
| 116 | */ |
| 117 | struct ion_handle { |
| 118 | struct kref ref; |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 119 | unsigned int user_ref_count; |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 120 | struct ion_client *client; |
| 121 | struct ion_buffer *buffer; |
| 122 | struct rb_node node; |
| 123 | unsigned int kmap_cnt; |
| 124 | int id; |
| 125 | }; |
| 126 | |
Rebecca Schultz Zavin | 13ba780 | 2013-12-13 14:24:06 -0800 | [diff] [blame] | 127 | bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) |
| 128 | { |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 129 | return (buffer->flags & ION_FLAG_CACHED) && |
| 130 | !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); |
Rebecca Schultz Zavin | 13ba780 | 2013-12-13 14:24:06 -0800 | [diff] [blame] | 131 | } |
| 132 | |
Rebecca Schultz Zavin | 45b17a8 | 2013-12-13 14:24:11 -0800 | [diff] [blame] | 133 | bool ion_buffer_cached(struct ion_buffer *buffer) |
| 134 | { |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 135 | return !!(buffer->flags & ION_FLAG_CACHED); |
| 136 | } |
| 137 | |
| 138 | static inline struct page *ion_buffer_page(struct page *page) |
| 139 | { |
| 140 | return (struct page *)((unsigned long)page & ~(1UL)); |
| 141 | } |
| 142 | |
| 143 | static inline bool ion_buffer_page_is_dirty(struct page *page) |
| 144 | { |
| 145 | return !!((unsigned long)page & 1UL); |
| 146 | } |
| 147 | |
| 148 | static inline void ion_buffer_page_dirty(struct page **page) |
| 149 | { |
| 150 | *page = (struct page *)((unsigned long)(*page) | 1UL); |
| 151 | } |
| 152 | |
| 153 | static inline void ion_buffer_page_clean(struct page **page) |
| 154 | { |
| 155 | *page = (struct page *)((unsigned long)(*page) & ~(1UL)); |
Rebecca Schultz Zavin | 45b17a8 | 2013-12-13 14:24:11 -0800 | [diff] [blame] | 156 | } |
| 157 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 158 | /* this function should only be called while dev->lock is held */ |
| 159 | static void ion_buffer_add(struct ion_device *dev, |
| 160 | struct ion_buffer *buffer) |
| 161 | { |
| 162 | struct rb_node **p = &dev->buffers.rb_node; |
| 163 | struct rb_node *parent = NULL; |
| 164 | struct ion_buffer *entry; |
| 165 | |
| 166 | while (*p) { |
| 167 | parent = *p; |
| 168 | entry = rb_entry(parent, struct ion_buffer, node); |
| 169 | |
| 170 | if (buffer < entry) { |
| 171 | p = &(*p)->rb_left; |
| 172 | } else if (buffer > entry) { |
| 173 | p = &(*p)->rb_right; |
| 174 | } else { |
| 175 | pr_err("%s: buffer already found.", __func__); |
| 176 | BUG(); |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | rb_link_node(&buffer->node, parent, p); |
| 181 | rb_insert_color(&buffer->node, &dev->buffers); |
| 182 | } |
| 183 | |
| 184 | /* this function should only be called while dev->lock is held */ |
| 185 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 186 | struct ion_device *dev, |
| 187 | unsigned long len, |
| 188 | unsigned long align, |
| 189 | unsigned long flags) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 190 | { |
| 191 | struct ion_buffer *buffer; |
Rebecca Schultz Zavin | 29ae6bc | 2013-12-13 14:23:43 -0800 | [diff] [blame] | 192 | struct sg_table *table; |
Rebecca Schultz Zavin | a46b6b2 | 2013-12-13 14:23:46 -0800 | [diff] [blame] | 193 | struct scatterlist *sg; |
| 194 | int i, ret; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 195 | |
Ben Marsh | 411059f | 2016-03-28 19:26:19 +0200 | [diff] [blame] | 196 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 197 | if (!buffer) |
| 198 | return ERR_PTR(-ENOMEM); |
| 199 | |
| 200 | buffer->heap = heap; |
Rebecca Schultz Zavin | 13ba780 | 2013-12-13 14:24:06 -0800 | [diff] [blame] | 201 | buffer->flags = flags; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 202 | kref_init(&buffer->ref); |
| 203 | |
| 204 | ret = heap->ops->allocate(heap, buffer, len, align, flags); |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 205 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 206 | if (ret) { |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 207 | if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) |
| 208 | goto err2; |
| 209 | |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 210 | ion_heap_freelist_drain(heap, 0); |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 211 | ret = heap->ops->allocate(heap, buffer, len, align, |
| 212 | flags); |
| 213 | if (ret) |
| 214 | goto err2; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 215 | } |
Rebecca Schultz Zavin | 29ae6bc | 2013-12-13 14:23:43 -0800 | [diff] [blame] | 216 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 217 | buffer->dev = dev; |
| 218 | buffer->size = len; |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 219 | INIT_LIST_HEAD(&buffer->vmas); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 220 | |
| 221 | table = heap->ops->map_dma(heap, buffer); |
| 222 | if (WARN_ONCE(!table, |
| 223 | "heap->ops->map_dma should return ERR_PTR on error")) |
| 224 | table = ERR_PTR(-EINVAL); |
| 225 | if (IS_ERR(table)) { |
Rohit kumar | a56d092 | 2015-09-30 11:07:35 +0530 | [diff] [blame] | 226 | ret = -EINVAL; |
| 227 | goto err1; |
Rebecca Schultz Zavin | 29ae6bc | 2013-12-13 14:23:43 -0800 | [diff] [blame] | 228 | } |
Rohit kumar | a56d092 | 2015-09-30 11:07:35 +0530 | [diff] [blame] | 229 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 230 | buffer->sg_table = table; |
Rebecca Schultz Zavin | 13ba780 | 2013-12-13 14:24:06 -0800 | [diff] [blame] | 231 | if (ion_buffer_fault_user_mappings(buffer)) { |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 232 | int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
| 233 | struct scatterlist *sg; |
| 234 | int i, j, k = 0; |
| 235 | |
| 236 | buffer->pages = vmalloc(sizeof(struct page *) * num_pages); |
| 237 | if (!buffer->pages) { |
| 238 | ret = -ENOMEM; |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 239 | goto err; |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 240 | } |
Rebecca Schultz Zavin | 29ae6bc | 2013-12-13 14:23:43 -0800 | [diff] [blame] | 241 | |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 242 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 243 | struct page *page = sg_page(sg); |
| 244 | |
Colin Cross | 06e0dca | 2013-12-13 14:25:02 -0800 | [diff] [blame] | 245 | for (j = 0; j < sg->length / PAGE_SIZE; j++) |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 246 | buffer->pages[k++] = page++; |
| 247 | } |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 248 | } |
| 249 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 250 | mutex_init(&buffer->lock); |
Sriram Raghunathan | 7e41617 | 2015-09-22 22:35:51 +0530 | [diff] [blame] | 251 | /* |
| 252 | * this will set up dma addresses for the sglist -- it is not |
| 253 | * technically correct as per the dma api -- a specific |
| 254 | * device isn't really taking ownership here. However, in practice on |
| 255 | * our systems the only dma_address space is physical addresses. |
| 256 | * Additionally, we can't afford the overhead of invalidating every |
| 257 | * allocation via dma_map_sg. The implicit contract here is that |
| 258 | * memory coming from the heaps is ready for dma, ie if it has a |
| 259 | * cached mapping that mapping has been invalidated |
| 260 | */ |
Liviu Dudau | 70bc916 | 2016-01-21 11:57:47 +0000 | [diff] [blame] | 261 | for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { |
Rebecca Schultz Zavin | a46b6b2 | 2013-12-13 14:23:46 -0800 | [diff] [blame] | 262 | sg_dma_address(sg) = sg_phys(sg); |
Liviu Dudau | 70bc916 | 2016-01-21 11:57:47 +0000 | [diff] [blame] | 263 | sg_dma_len(sg) = sg->length; |
| 264 | } |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 265 | |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 266 | mutex_lock(&dev->buffer_lock); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 267 | ion_buffer_add(dev, buffer); |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 268 | mutex_unlock(&dev->buffer_lock); |
Patrick Daly | e464006 | 2017-08-01 19:56:52 -0700 | [diff] [blame] | 269 | atomic_long_add(len, &heap->total_allocated); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 270 | return buffer; |
Rebecca Schultz Zavin | d3c0bce | 2013-12-13 14:24:04 -0800 | [diff] [blame] | 271 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 272 | err: |
| 273 | heap->ops->unmap_dma(heap, buffer); |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 274 | err1: |
Rohit kumar | a56d092 | 2015-09-30 11:07:35 +0530 | [diff] [blame] | 275 | heap->ops->free(buffer); |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 276 | err2: |
Rebecca Schultz Zavin | d3c0bce | 2013-12-13 14:24:04 -0800 | [diff] [blame] | 277 | kfree(buffer); |
| 278 | return ERR_PTR(ret); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 279 | } |
| 280 | |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 281 | void ion_buffer_destroy(struct ion_buffer *buffer) |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 282 | { |
Laura Abbott | 8da07ee | 2018-05-14 14:35:09 -0700 | [diff] [blame] | 283 | if (buffer->kmap_cnt > 0) { |
| 284 | pr_warn_once("%s: buffer still mapped in the kernel\n", |
| 285 | __func__); |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 286 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); |
Laura Abbott | 8da07ee | 2018-05-14 14:35:09 -0700 | [diff] [blame] | 287 | } |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 288 | buffer->heap->ops->unmap_dma(buffer->heap, buffer); |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 289 | |
Patrick Daly | e464006 | 2017-08-01 19:56:52 -0700 | [diff] [blame] | 290 | atomic_long_sub(buffer->size, &buffer->heap->total_allocated); |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 291 | buffer->heap->ops->free(buffer); |
Markus Elfring | 698f140 | 2014-11-23 18:48:15 +0100 | [diff] [blame] | 292 | vfree(buffer->pages); |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 293 | kfree(buffer); |
| 294 | } |
| 295 | |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 296 | static void _ion_buffer_destroy(struct kref *kref) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 297 | { |
| 298 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 299 | struct ion_heap *heap = buffer->heap; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 300 | struct ion_device *dev = buffer->dev; |
| 301 | |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 302 | msm_dma_buf_freed(buffer); |
| 303 | |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 304 | mutex_lock(&dev->buffer_lock); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 305 | rb_erase(&buffer->node, &dev->buffers); |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 306 | mutex_unlock(&dev->buffer_lock); |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 307 | |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 308 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
| 309 | ion_heap_freelist_add(heap, buffer); |
| 310 | else |
| 311 | ion_buffer_destroy(buffer); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | static void ion_buffer_get(struct ion_buffer *buffer) |
| 315 | { |
| 316 | kref_get(&buffer->ref); |
| 317 | } |
| 318 | |
| 319 | static int ion_buffer_put(struct ion_buffer *buffer) |
| 320 | { |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 321 | return kref_put(&buffer->ref, _ion_buffer_destroy); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 322 | } |
| 323 | |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 324 | static void ion_buffer_add_to_handle(struct ion_buffer *buffer) |
| 325 | { |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 326 | mutex_lock(&buffer->lock); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 327 | if (buffer->handle_count == 0) |
Patrick Daly | e464006 | 2017-08-01 19:56:52 -0700 | [diff] [blame] | 328 | atomic_long_add(buffer->size, &buffer->heap->total_handles); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 329 | |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 330 | buffer->handle_count++; |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 331 | mutex_unlock(&buffer->lock); |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 332 | } |
| 333 | |
| 334 | static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) |
| 335 | { |
| 336 | /* |
| 337 | * when a buffer is removed from a handle, if it is not in |
| 338 | * any other handles, copy the taskcomm and the pid of the |
| 339 | * process it's being removed from into the buffer. At this |
| 340 | * point there will be no way to track what processes this buffer is |
| 341 | * being used by, it only exists as a dma_buf file descriptor. |
| 342 | * The taskcomm and pid can provide a debug hint as to where this fd |
| 343 | * is in the system |
| 344 | */ |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 345 | mutex_lock(&buffer->lock); |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 346 | buffer->handle_count--; |
| 347 | BUG_ON(buffer->handle_count < 0); |
| 348 | if (!buffer->handle_count) { |
| 349 | struct task_struct *task; |
| 350 | |
| 351 | task = current->group_leader; |
| 352 | get_task_comm(buffer->task_comm, task); |
| 353 | buffer->pid = task_pid_nr(task); |
Patrick Daly | e464006 | 2017-08-01 19:56:52 -0700 | [diff] [blame] | 354 | atomic_long_sub(buffer->size, &buffer->heap->total_handles); |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 355 | } |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 356 | mutex_unlock(&buffer->lock); |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 357 | } |
| 358 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 359 | static struct ion_handle *ion_handle_create(struct ion_client *client, |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 360 | struct ion_buffer *buffer) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 361 | { |
| 362 | struct ion_handle *handle; |
| 363 | |
Ben Marsh | 411059f | 2016-03-28 19:26:19 +0200 | [diff] [blame] | 364 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 365 | if (!handle) |
| 366 | return ERR_PTR(-ENOMEM); |
| 367 | kref_init(&handle->ref); |
| 368 | RB_CLEAR_NODE(&handle->node); |
| 369 | handle->client = client; |
| 370 | ion_buffer_get(buffer); |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 371 | ion_buffer_add_to_handle(buffer); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 372 | handle->buffer = buffer; |
| 373 | |
| 374 | return handle; |
| 375 | } |
| 376 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 377 | static void ion_handle_kmap_put(struct ion_handle *); |
| 378 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 379 | static void ion_handle_destroy(struct kref *kref) |
| 380 | { |
| 381 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 382 | struct ion_client *client = handle->client; |
| 383 | struct ion_buffer *buffer = handle->buffer; |
| 384 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 385 | mutex_lock(&buffer->lock); |
Laura Abbott | 2900cd7 | 2013-12-13 14:23:48 -0800 | [diff] [blame] | 386 | while (handle->kmap_cnt) |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 387 | ion_handle_kmap_put(handle); |
| 388 | mutex_unlock(&buffer->lock); |
| 389 | |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 390 | idr_remove(&client->idr, handle->id); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 391 | if (!RB_EMPTY_NODE(&handle->node)) |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 392 | rb_erase(&handle->node, &client->handles); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 393 | |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 394 | ion_buffer_remove_from_handle(buffer); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 395 | ion_buffer_put(buffer); |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 396 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 397 | kfree(handle); |
| 398 | } |
| 399 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 400 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) |
| 401 | { |
| 402 | return handle->buffer; |
| 403 | } |
| 404 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 405 | static void ion_handle_get(struct ion_handle *handle) |
| 406 | { |
| 407 | kref_get(&handle->ref); |
| 408 | } |
| 409 | |
Daniel Rosenberg | 20746c1 | 2016-12-05 16:28:28 -0800 | [diff] [blame] | 410 | /* Must hold the client lock */ |
| 411 | static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle) |
| 412 | { |
| 413 | if (atomic_read(&handle->ref.refcount) + 1 == 0) |
| 414 | return ERR_PTR(-EOVERFLOW); |
| 415 | ion_handle_get(handle); |
| 416 | return handle; |
| 417 | } |
| 418 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 419 | static int ion_handle_put_nolock(struct ion_handle *handle) |
EunTaik Lee | 9590232 | 2016-02-24 04:38:06 +0000 | [diff] [blame] | 420 | { |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 421 | int ret; |
| 422 | |
| 423 | ret = kref_put(&handle->ref, ion_handle_destroy); |
| 424 | |
| 425 | return ret; |
EunTaik Lee | 9590232 | 2016-02-24 04:38:06 +0000 | [diff] [blame] | 426 | } |
| 427 | |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 428 | int ion_handle_put(struct ion_handle *handle) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 429 | { |
Colin Cross | 83271f6 | 2013-12-13 14:24:59 -0800 | [diff] [blame] | 430 | struct ion_client *client = handle->client; |
| 431 | int ret; |
| 432 | |
| 433 | mutex_lock(&client->lock); |
EunTaik Lee | 9590232 | 2016-02-24 04:38:06 +0000 | [diff] [blame] | 434 | ret = ion_handle_put_nolock(handle); |
Colin Cross | 83271f6 | 2013-12-13 14:24:59 -0800 | [diff] [blame] | 435 | mutex_unlock(&client->lock); |
| 436 | |
| 437 | return ret; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 438 | } |
| 439 | |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 440 | /* Must hold the client lock */ |
| 441 | static void user_ion_handle_get(struct ion_handle *handle) |
| 442 | { |
| 443 | if (handle->user_ref_count++ == 0) |
| 444 | kref_get(&handle->ref); |
| 445 | } |
| 446 | |
| 447 | /* Must hold the client lock */ |
| 448 | static struct ion_handle *user_ion_handle_get_check_overflow( |
| 449 | struct ion_handle *handle) |
| 450 | { |
| 451 | if (handle->user_ref_count + 1 == 0) |
| 452 | return ERR_PTR(-EOVERFLOW); |
| 453 | user_ion_handle_get(handle); |
| 454 | return handle; |
| 455 | } |
| 456 | |
| 457 | /* passes a kref to the user ref count. |
| 458 | * We know we're holding a kref to the object before and |
| 459 | * after this call, so no need to reverify handle. |
| 460 | */ |
| 461 | static struct ion_handle *pass_to_user(struct ion_handle *handle) |
| 462 | { |
| 463 | struct ion_client *client = handle->client; |
| 464 | struct ion_handle *ret; |
| 465 | |
| 466 | mutex_lock(&client->lock); |
| 467 | ret = user_ion_handle_get_check_overflow(handle); |
| 468 | ion_handle_put_nolock(handle); |
| 469 | mutex_unlock(&client->lock); |
| 470 | return ret; |
| 471 | } |
| 472 | |
| 473 | /* Must hold the client lock */ |
| 474 | static int user_ion_handle_put_nolock(struct ion_handle *handle) |
| 475 | { |
Daniel Rosenberg | 5058f3c | 2017-04-04 14:27:16 -0700 | [diff] [blame] | 476 | int ret = 0; |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 477 | |
| 478 | if (--handle->user_ref_count == 0) |
| 479 | ret = ion_handle_put_nolock(handle); |
| 480 | |
| 481 | return ret; |
| 482 | } |
| 483 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 484 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, |
| 485 | struct ion_buffer *buffer) |
| 486 | { |
Colin Cross | e1cf368 | 2013-12-13 14:24:51 -0800 | [diff] [blame] | 487 | struct rb_node *n = client->handles.rb_node; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 488 | |
Colin Cross | e1cf368 | 2013-12-13 14:24:51 -0800 | [diff] [blame] | 489 | while (n) { |
| 490 | struct ion_handle *entry = rb_entry(n, struct ion_handle, node); |
Seunghun Lee | 10f6286 | 2014-05-01 01:30:23 +0900 | [diff] [blame] | 491 | |
Colin Cross | e1cf368 | 2013-12-13 14:24:51 -0800 | [diff] [blame] | 492 | if (buffer < entry->buffer) |
| 493 | n = n->rb_left; |
| 494 | else if (buffer > entry->buffer) |
| 495 | n = n->rb_right; |
| 496 | else |
| 497 | return entry; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 498 | } |
Colin Cross | 9e90765 | 2013-12-13 14:24:49 -0800 | [diff] [blame] | 499 | return ERR_PTR(-EINVAL); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 500 | } |
| 501 | |
Laura Abbott | b1fa6d8 | 2016-09-07 11:49:58 -0700 | [diff] [blame] | 502 | struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, |
| 503 | int id) |
EunTaik Lee | 9590232 | 2016-02-24 04:38:06 +0000 | [diff] [blame] | 504 | { |
| 505 | struct ion_handle *handle; |
| 506 | |
| 507 | handle = idr_find(&client->idr, id); |
| 508 | if (handle) |
Daniel Rosenberg | 20746c1 | 2016-12-05 16:28:28 -0800 | [diff] [blame] | 509 | return ion_handle_get_check_overflow(handle); |
EunTaik Lee | 9590232 | 2016-02-24 04:38:06 +0000 | [diff] [blame] | 510 | |
Daniel Rosenberg | 20746c1 | 2016-12-05 16:28:28 -0800 | [diff] [blame] | 511 | return ERR_PTR(-EINVAL); |
EunTaik Lee | 9590232 | 2016-02-24 04:38:06 +0000 | [diff] [blame] | 512 | } |
| 513 | |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 514 | bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 515 | { |
Colin Cross | 83271f6 | 2013-12-13 14:24:59 -0800 | [diff] [blame] | 516 | WARN_ON(!mutex_is_locked(&client->lock)); |
Daeseok Youn | 5110898 | 2014-02-10 20:16:50 +0900 | [diff] [blame] | 517 | return idr_find(&client->idr, handle->id) == handle; |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 518 | } |
| 519 | |
| 520 | static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) |
| 521 | { |
Colin Cross | b26661d | 2013-12-13 14:25:05 -0800 | [diff] [blame] | 522 | int id; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 523 | struct rb_node **p = &client->handles.rb_node; |
| 524 | struct rb_node *parent = NULL; |
| 525 | struct ion_handle *entry; |
| 526 | |
Colin Cross | b26661d | 2013-12-13 14:25:05 -0800 | [diff] [blame] | 527 | id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); |
| 528 | if (id < 0) |
| 529 | return id; |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 530 | |
Colin Cross | b26661d | 2013-12-13 14:25:05 -0800 | [diff] [blame] | 531 | handle->id = id; |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 532 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 533 | while (*p) { |
| 534 | parent = *p; |
| 535 | entry = rb_entry(parent, struct ion_handle, node); |
| 536 | |
Colin Cross | e1cf368 | 2013-12-13 14:24:51 -0800 | [diff] [blame] | 537 | if (handle->buffer < entry->buffer) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 538 | p = &(*p)->rb_left; |
Colin Cross | e1cf368 | 2013-12-13 14:24:51 -0800 | [diff] [blame] | 539 | else if (handle->buffer > entry->buffer) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 540 | p = &(*p)->rb_right; |
| 541 | else |
| 542 | WARN(1, "%s: buffer already found.", __func__); |
| 543 | } |
| 544 | |
| 545 | rb_link_node(&handle->node, parent, p); |
| 546 | rb_insert_color(&handle->node, &client->handles); |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 547 | |
| 548 | return 0; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 549 | } |
| 550 | |
Daniel Rosenberg | c30d45a | 2016-11-02 17:43:51 -0700 | [diff] [blame] | 551 | static struct ion_handle *__ion_alloc( |
| 552 | struct ion_client *client, size_t len, |
| 553 | size_t align, unsigned int heap_id_mask, |
| 554 | unsigned int flags, bool grab_handle) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 555 | { |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 556 | struct ion_handle *handle; |
| 557 | struct ion_device *dev = client->dev; |
| 558 | struct ion_buffer *buffer = NULL; |
Rebecca Schultz Zavin | cd69488 | 2013-12-13 14:24:25 -0800 | [diff] [blame] | 559 | struct ion_heap *heap; |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 560 | int ret; |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 561 | const unsigned int MAX_DBG_STR_LEN = 64; |
| 562 | char dbg_str[MAX_DBG_STR_LEN]; |
| 563 | unsigned int dbg_str_idx = 0; |
| 564 | |
| 565 | dbg_str[0] = '\0'; |
| 566 | |
| 567 | /* |
| 568 | * For now, we don't want to fault in pages individually since |
| 569 | * clients are already doing manual cache maintenance. In |
| 570 | * other words, the implicit caching infrastructure is in |
| 571 | * place (in code) but should not be used. |
| 572 | */ |
| 573 | flags |= ION_FLAG_CACHED_NEEDS_SYNC; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 574 | |
Colin Cross | e61fc91 | 2013-12-13 19:26:14 -0800 | [diff] [blame] | 575 | pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, |
Rebecca Schultz Zavin | 38eeeb5 | 2013-12-13 14:24:28 -0800 | [diff] [blame] | 576 | len, align, heap_id_mask, flags); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 577 | /* |
| 578 | * traverse the list of heaps available in this system in priority |
| 579 | * order. If the heap type is supported by the client, and matches the |
| 580 | * request of the caller allocate from it. Repeat until allocate has |
| 581 | * succeeded or all heaps have been tried |
| 582 | */ |
KyongHo Cho | 54ac0784 | 2013-12-13 14:23:39 -0800 | [diff] [blame] | 583 | len = PAGE_ALIGN(len); |
| 584 | |
Colin Cross | a14baf7 | 2013-12-13 14:25:00 -0800 | [diff] [blame] | 585 | if (!len) |
| 586 | return ERR_PTR(-EINVAL); |
| 587 | |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 588 | down_read(&dev->lock); |
Rebecca Schultz Zavin | cd69488 | 2013-12-13 14:24:25 -0800 | [diff] [blame] | 589 | plist_for_each_entry(heap, &dev->heaps, node) { |
Rebecca Schultz Zavin | 38eeeb5 | 2013-12-13 14:24:28 -0800 | [diff] [blame] | 590 | /* if the caller didn't specify this heap id */ |
| 591 | if (!((1 << heap->id) & heap_id_mask)) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 592 | continue; |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 593 | trace_ion_alloc_buffer_start(client->name, heap->name, len, |
Vishwanath Raju K | f67270b | 2018-09-17 18:03:01 +0530 | [diff] [blame] | 594 | heap_id_mask, flags, client->pid, current->comm, |
| 595 | current->pid, (void *)buffer); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 596 | buffer = ion_buffer_create(heap, dev, len, align, flags); |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 597 | trace_ion_alloc_buffer_end(client->name, heap->name, len, |
Vishwanath Raju K | f67270b | 2018-09-17 18:03:01 +0530 | [diff] [blame] | 598 | heap_id_mask, flags, client->pid, current->comm, |
| 599 | current->pid, (void *)buffer); |
Colin Cross | 9e90765 | 2013-12-13 14:24:49 -0800 | [diff] [blame] | 600 | if (!IS_ERR(buffer)) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 601 | break; |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 602 | |
| 603 | trace_ion_alloc_buffer_fallback(client->name, heap->name, len, |
| 604 | heap_id_mask, flags, |
| 605 | PTR_ERR(buffer)); |
| 606 | if (dbg_str_idx < MAX_DBG_STR_LEN) { |
| 607 | unsigned int len_left; |
| 608 | int ret_value; |
| 609 | |
| 610 | len_left = MAX_DBG_STR_LEN - dbg_str_idx - 1; |
| 611 | ret_value = snprintf(&dbg_str[dbg_str_idx], |
| 612 | len_left, "%s ", heap->name); |
| 613 | |
| 614 | if (ret_value >= len_left) { |
| 615 | /* overflow */ |
| 616 | dbg_str[MAX_DBG_STR_LEN - 1] = '\0'; |
| 617 | dbg_str_idx = MAX_DBG_STR_LEN; |
| 618 | } else if (ret_value >= 0) { |
| 619 | dbg_str_idx += ret_value; |
| 620 | } else { |
| 621 | /* error */ |
| 622 | dbg_str[MAX_DBG_STR_LEN - 1] = '\0'; |
| 623 | } |
| 624 | } |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 625 | } |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 626 | up_read(&dev->lock); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 627 | |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 628 | if (!buffer) { |
| 629 | trace_ion_alloc_buffer_fail(client->name, dbg_str, len, |
| 630 | heap_id_mask, flags, -ENODEV); |
KyongHo Cho | 54ac0784 | 2013-12-13 14:23:39 -0800 | [diff] [blame] | 631 | return ERR_PTR(-ENODEV); |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 632 | } |
KyongHo Cho | 54ac0784 | 2013-12-13 14:23:39 -0800 | [diff] [blame] | 633 | |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 634 | if (IS_ERR(buffer)) { |
| 635 | trace_ion_alloc_buffer_fail(client->name, dbg_str, len, |
| 636 | heap_id_mask, flags, |
| 637 | PTR_ERR(buffer)); |
| 638 | pr_debug("ION is unable to allocate 0x%zx bytes (alignment: 0x%zx) from heap(s) %sfor client %s\n", |
| 639 | len, align, dbg_str, client->name); |
Iulia Manda | 464a502 | 2014-03-11 20:14:36 +0200 | [diff] [blame] | 640 | return ERR_CAST(buffer); |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 641 | } |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 642 | |
| 643 | handle = ion_handle_create(client, buffer); |
| 644 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 645 | /* |
| 646 | * ion_buffer_create will create a buffer with a ref_cnt of 1, |
| 647 | * and ion_handle_create will take a second reference, drop one here |
| 648 | */ |
| 649 | ion_buffer_put(buffer); |
| 650 | |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 651 | if (IS_ERR(handle)) |
| 652 | return handle; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 653 | |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 654 | mutex_lock(&client->lock); |
Daniel Rosenberg | c30d45a | 2016-11-02 17:43:51 -0700 | [diff] [blame] | 655 | if (grab_handle) |
| 656 | ion_handle_get(handle); |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 657 | ret = ion_handle_add(client, handle); |
Colin Cross | 83271f6 | 2013-12-13 14:24:59 -0800 | [diff] [blame] | 658 | mutex_unlock(&client->lock); |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 659 | if (ret) { |
| 660 | ion_handle_put(handle); |
| 661 | handle = ERR_PTR(ret); |
| 662 | } |
Rebecca Schultz Zavin | 29ae6bc | 2013-12-13 14:23:43 -0800 | [diff] [blame] | 663 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 664 | return handle; |
| 665 | } |
Daniel Rosenberg | c30d45a | 2016-11-02 17:43:51 -0700 | [diff] [blame] | 666 | |
| 667 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, |
| 668 | size_t align, unsigned int heap_id_mask, |
| 669 | unsigned int flags) |
| 670 | { |
| 671 | return __ion_alloc(client, len, align, heap_id_mask, flags, false); |
| 672 | } |
Olav Haugan | ee4c8aa | 2013-12-13 14:23:55 -0800 | [diff] [blame] | 673 | EXPORT_SYMBOL(ion_alloc); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 674 | |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 675 | void ion_free_nolock(struct ion_client *client, struct ion_handle *handle) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 676 | { |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 677 | bool valid_handle; |
| 678 | |
| 679 | WARN_ON(client != handle->client); |
| 680 | |
| 681 | valid_handle = ion_handle_validate(client, handle); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 682 | if (!valid_handle) { |
Olav Haugan | a9bb075 | 2013-12-13 14:23:54 -0800 | [diff] [blame] | 683 | WARN(1, "%s: invalid handle passed to free.\n", __func__); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 684 | return; |
| 685 | } |
EunTaik Lee | 9590232 | 2016-02-24 04:38:06 +0000 | [diff] [blame] | 686 | ion_handle_put_nolock(handle); |
| 687 | } |
| 688 | |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 689 | static void user_ion_free_nolock(struct ion_client *client, |
| 690 | struct ion_handle *handle) |
| 691 | { |
| 692 | bool valid_handle; |
| 693 | |
| 694 | WARN_ON(client != handle->client); |
| 695 | |
| 696 | valid_handle = ion_handle_validate(client, handle); |
| 697 | if (!valid_handle) { |
| 698 | WARN(1, "%s: invalid handle passed to free.\n", __func__); |
| 699 | return; |
| 700 | } |
Sudarshan Rajagopalan | a17012a | 2017-05-01 15:52:29 -0700 | [diff] [blame] | 701 | if (handle->user_ref_count == 0) { |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 702 | WARN(1, "%s: User does not have access!\n", __func__); |
| 703 | return; |
| 704 | } |
Vishwanath Raju K | f67270b | 2018-09-17 18:03:01 +0530 | [diff] [blame] | 705 | trace_ion_free_buffer(client->name, client->pid, current->comm, |
| 706 | current->pid, (void *)handle->buffer, |
| 707 | handle->buffer->size); |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 708 | user_ion_handle_put_nolock(handle); |
| 709 | } |
| 710 | |
EunTaik Lee | 9590232 | 2016-02-24 04:38:06 +0000 | [diff] [blame] | 711 | void ion_free(struct ion_client *client, struct ion_handle *handle) |
| 712 | { |
| 713 | BUG_ON(client != handle->client); |
| 714 | |
| 715 | mutex_lock(&client->lock); |
| 716 | ion_free_nolock(client, handle); |
Rebecca Schultz Zavin | 0e9c03a | 2013-12-13 14:24:01 -0800 | [diff] [blame] | 717 | mutex_unlock(&client->lock); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 718 | } |
Olav Haugan | ee4c8aa | 2013-12-13 14:23:55 -0800 | [diff] [blame] | 719 | EXPORT_SYMBOL(ion_free); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 720 | |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 721 | static int __ion_phys(struct ion_client *client, struct ion_handle *handle, |
| 722 | ion_phys_addr_t *addr, size_t *len, bool lock_client) |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 723 | { |
| 724 | struct ion_buffer *buffer; |
| 725 | int ret; |
| 726 | |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 727 | if (lock_client) |
| 728 | mutex_lock(&client->lock); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 729 | if (!ion_handle_validate(client, handle)) { |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 730 | if (lock_client) |
| 731 | mutex_unlock(&client->lock); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 732 | return -EINVAL; |
| 733 | } |
| 734 | |
| 735 | buffer = handle->buffer; |
| 736 | |
| 737 | if (!buffer->heap->ops->phys) { |
| 738 | pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n", |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 739 | __func__, buffer->heap->name, buffer->heap->type); |
| 740 | if (lock_client) |
| 741 | mutex_unlock(&client->lock); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 742 | return -ENODEV; |
| 743 | } |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 744 | ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 745 | if (lock_client) |
| 746 | mutex_unlock(&client->lock); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 747 | return ret; |
| 748 | } |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 749 | |
| 750 | int ion_phys(struct ion_client *client, struct ion_handle *handle, |
| 751 | ion_phys_addr_t *addr, size_t *len) |
| 752 | { |
| 753 | return __ion_phys(client, handle, addr, len, true); |
| 754 | } |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 755 | EXPORT_SYMBOL(ion_phys); |
| 756 | |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 757 | int ion_phys_nolock(struct ion_client *client, struct ion_handle *handle, |
| 758 | ion_phys_addr_t *addr, size_t *len) |
| 759 | { |
| 760 | return __ion_phys(client, handle, addr, len, false); |
| 761 | } |
| 762 | |
Rebecca Schultz Zavin | 0f34faf | 2013-12-13 14:23:42 -0800 | [diff] [blame] | 763 | static void *ion_buffer_kmap_get(struct ion_buffer *buffer) |
| 764 | { |
| 765 | void *vaddr; |
| 766 | |
| 767 | if (buffer->kmap_cnt) { |
| 768 | buffer->kmap_cnt++; |
| 769 | return buffer->vaddr; |
| 770 | } |
| 771 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 772 | if (WARN_ONCE(vaddr == NULL, |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 773 | "heap->ops->map_kernel should return ERR_PTR on error")) |
Colin Cross | 9e90765 | 2013-12-13 14:24:49 -0800 | [diff] [blame] | 774 | return ERR_PTR(-EINVAL); |
| 775 | if (IS_ERR(vaddr)) |
Rebecca Schultz Zavin | 0f34faf | 2013-12-13 14:23:42 -0800 | [diff] [blame] | 776 | return vaddr; |
| 777 | buffer->vaddr = vaddr; |
| 778 | buffer->kmap_cnt++; |
| 779 | return vaddr; |
| 780 | } |
| 781 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 782 | static void *ion_handle_kmap_get(struct ion_handle *handle) |
| 783 | { |
| 784 | struct ion_buffer *buffer = handle->buffer; |
| 785 | void *vaddr; |
| 786 | |
| 787 | if (handle->kmap_cnt) { |
| 788 | handle->kmap_cnt++; |
| 789 | return buffer->vaddr; |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 790 | } |
Rebecca Schultz Zavin | 0f34faf | 2013-12-13 14:23:42 -0800 | [diff] [blame] | 791 | vaddr = ion_buffer_kmap_get(buffer); |
Colin Cross | 9e90765 | 2013-12-13 14:24:49 -0800 | [diff] [blame] | 792 | if (IS_ERR(vaddr)) |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 793 | return vaddr; |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 794 | handle->kmap_cnt++; |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 795 | return vaddr; |
| 796 | } |
| 797 | |
Rebecca Schultz Zavin | 0f34faf | 2013-12-13 14:23:42 -0800 | [diff] [blame] | 798 | static void ion_buffer_kmap_put(struct ion_buffer *buffer) |
| 799 | { |
| 800 | buffer->kmap_cnt--; |
| 801 | if (!buffer->kmap_cnt) { |
| 802 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); |
| 803 | buffer->vaddr = NULL; |
| 804 | } |
| 805 | } |
| 806 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 807 | static void ion_handle_kmap_put(struct ion_handle *handle) |
| 808 | { |
| 809 | struct ion_buffer *buffer = handle->buffer; |
| 810 | |
Mitchel Humpherys | 22f6b97 | 2014-05-23 13:01:22 -0700 | [diff] [blame] | 811 | if (!handle->kmap_cnt) { |
| 812 | WARN(1, "%s: Double unmap detected! bailing...\n", __func__); |
| 813 | return; |
| 814 | } |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 815 | handle->kmap_cnt--; |
| 816 | if (!handle->kmap_cnt) |
Rebecca Schultz Zavin | 0f34faf | 2013-12-13 14:23:42 -0800 | [diff] [blame] | 817 | ion_buffer_kmap_put(buffer); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 818 | } |
| 819 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 820 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) |
| 821 | { |
| 822 | struct ion_buffer *buffer; |
| 823 | void *vaddr; |
| 824 | |
| 825 | mutex_lock(&client->lock); |
| 826 | if (!ion_handle_validate(client, handle)) { |
| 827 | pr_err("%s: invalid handle passed to map_kernel.\n", |
| 828 | __func__); |
| 829 | mutex_unlock(&client->lock); |
| 830 | return ERR_PTR(-EINVAL); |
| 831 | } |
| 832 | |
| 833 | buffer = handle->buffer; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 834 | |
| 835 | if (!handle->buffer->heap->ops->map_kernel) { |
| 836 | pr_err("%s: map_kernel is not implemented by this heap.\n", |
| 837 | __func__); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 838 | mutex_unlock(&client->lock); |
| 839 | return ERR_PTR(-ENODEV); |
| 840 | } |
| 841 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 842 | mutex_lock(&buffer->lock); |
| 843 | vaddr = ion_handle_kmap_get(handle); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 844 | mutex_unlock(&buffer->lock); |
| 845 | mutex_unlock(&client->lock); |
| 846 | return vaddr; |
| 847 | } |
Olav Haugan | ee4c8aa | 2013-12-13 14:23:55 -0800 | [diff] [blame] | 848 | EXPORT_SYMBOL(ion_map_kernel); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 849 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 850 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) |
| 851 | { |
| 852 | struct ion_buffer *buffer; |
| 853 | |
| 854 | mutex_lock(&client->lock); |
| 855 | buffer = handle->buffer; |
| 856 | mutex_lock(&buffer->lock); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 857 | ion_handle_kmap_put(handle); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 858 | mutex_unlock(&buffer->lock); |
| 859 | mutex_unlock(&client->lock); |
| 860 | } |
Olav Haugan | ee4c8aa | 2013-12-13 14:23:55 -0800 | [diff] [blame] | 861 | EXPORT_SYMBOL(ion_unmap_kernel); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 862 | |
Neil Zhang | 948c4db | 2016-01-26 17:39:06 +0800 | [diff] [blame] | 863 | static struct rb_root *ion_root_client; |
Neil Zhang | 948c4db | 2016-01-26 17:39:06 +0800 | [diff] [blame] | 864 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 865 | static int ion_debug_client_show(struct seq_file *s, void *unused) |
| 866 | { |
| 867 | struct ion_client *client = s->private; |
| 868 | struct rb_node *n; |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 869 | |
| 870 | seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n", |
| 871 | "heap_name", "size_in_bytes", "handle refcount", |
| 872 | "buffer"); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 873 | |
| 874 | mutex_lock(&client->lock); |
| 875 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { |
| 876 | struct ion_handle *handle = rb_entry(n, struct ion_handle, |
| 877 | node); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 878 | |
Nick Desaulniers | 25f2d03 | 2016-10-07 11:51:15 -0700 | [diff] [blame] | 879 | seq_printf(s, "%16.16s: %16zx : %16d : %12pK", |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 880 | handle->buffer->heap->name, |
| 881 | handle->buffer->size, |
| 882 | atomic_read(&handle->ref.refcount), |
| 883 | handle->buffer); |
| 884 | |
| 885 | seq_puts(s, "\n"); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 886 | } |
| 887 | mutex_unlock(&client->lock); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 888 | return 0; |
| 889 | } |
| 890 | |
| 891 | static int ion_debug_client_open(struct inode *inode, struct file *file) |
| 892 | { |
| 893 | return single_open(file, ion_debug_client_show, inode->i_private); |
| 894 | } |
| 895 | |
| 896 | static const struct file_operations debug_client_fops = { |
| 897 | .open = ion_debug_client_open, |
| 898 | .read = seq_read, |
| 899 | .llseek = seq_lseek, |
| 900 | .release = single_release, |
| 901 | }; |
| 902 | |
Mitchel Humpherys | 2803ac7 | 2014-02-17 13:58:37 -0800 | [diff] [blame] | 903 | static int ion_get_client_serial(const struct rb_root *root, |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 904 | const unsigned char *name) |
Mitchel Humpherys | 2803ac7 | 2014-02-17 13:58:37 -0800 | [diff] [blame] | 905 | { |
| 906 | int serial = -1; |
| 907 | struct rb_node *node; |
Seunghun Lee | 10f6286 | 2014-05-01 01:30:23 +0900 | [diff] [blame] | 908 | |
Mitchel Humpherys | 2803ac7 | 2014-02-17 13:58:37 -0800 | [diff] [blame] | 909 | for (node = rb_first(root); node; node = rb_next(node)) { |
| 910 | struct ion_client *client = rb_entry(node, struct ion_client, |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 911 | node); |
Seunghun Lee | 10f6286 | 2014-05-01 01:30:23 +0900 | [diff] [blame] | 912 | |
Mitchel Humpherys | 2803ac7 | 2014-02-17 13:58:37 -0800 | [diff] [blame] | 913 | if (strcmp(client->name, name)) |
| 914 | continue; |
| 915 | serial = max(serial, client->display_serial); |
| 916 | } |
| 917 | return serial + 1; |
| 918 | } |
| 919 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 920 | struct ion_client *ion_client_create(struct ion_device *dev, |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 921 | const char *name) |
| 922 | { |
| 923 | struct ion_client *client; |
| 924 | struct task_struct *task; |
| 925 | struct rb_node **p; |
| 926 | struct rb_node *parent = NULL; |
| 927 | struct ion_client *entry; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 928 | pid_t pid; |
| 929 | |
Mitchel Humpherys | 2803ac7 | 2014-02-17 13:58:37 -0800 | [diff] [blame] | 930 | if (!name) { |
| 931 | pr_err("%s: Name cannot be null\n", __func__); |
| 932 | return ERR_PTR(-EINVAL); |
| 933 | } |
| 934 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 935 | get_task_struct(current->group_leader); |
| 936 | task_lock(current->group_leader); |
| 937 | pid = task_pid_nr(current->group_leader); |
Sriram Raghunathan | 7e41617 | 2015-09-22 22:35:51 +0530 | [diff] [blame] | 938 | /* |
| 939 | * don't bother to store task struct for kernel threads, |
| 940 | * they can't be killed anyway |
| 941 | */ |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 942 | if (current->group_leader->flags & PF_KTHREAD) { |
| 943 | put_task_struct(current->group_leader); |
| 944 | task = NULL; |
| 945 | } else { |
| 946 | task = current->group_leader; |
| 947 | } |
| 948 | task_unlock(current->group_leader); |
| 949 | |
Ben Marsh | 411059f | 2016-03-28 19:26:19 +0200 | [diff] [blame] | 950 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
Mitchel Humpherys | ae5cbf4 | 2014-02-17 13:58:36 -0800 | [diff] [blame] | 951 | if (!client) |
| 952 | goto err_put_task_struct; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 953 | |
| 954 | client->dev = dev; |
| 955 | client->handles = RB_ROOT; |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 956 | idr_init(&client->idr); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 957 | mutex_init(&client->lock); |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 958 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 959 | client->task = task; |
| 960 | client->pid = pid; |
Mitchel Humpherys | ae5cbf4 | 2014-02-17 13:58:36 -0800 | [diff] [blame] | 961 | client->name = kstrdup(name, GFP_KERNEL); |
| 962 | if (!client->name) |
| 963 | goto err_free_client; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 964 | |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 965 | down_write(&dev->lock); |
Mitchel Humpherys | 2803ac7 | 2014-02-17 13:58:37 -0800 | [diff] [blame] | 966 | client->display_serial = ion_get_client_serial(&dev->clients, name); |
| 967 | client->display_name = kasprintf( |
| 968 | GFP_KERNEL, "%s-%d", name, client->display_serial); |
| 969 | if (!client->display_name) { |
| 970 | up_write(&dev->lock); |
| 971 | goto err_free_client_name; |
| 972 | } |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 973 | p = &dev->clients.rb_node; |
| 974 | while (*p) { |
| 975 | parent = *p; |
| 976 | entry = rb_entry(parent, struct ion_client, node); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 977 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 978 | if (client < entry) |
| 979 | p = &(*p)->rb_left; |
| 980 | else if (client > entry) |
| 981 | p = &(*p)->rb_right; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 982 | } |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 983 | rb_link_node(&client->node, parent, p); |
| 984 | rb_insert_color(&client->node, &dev->clients); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 985 | |
Mitchel Humpherys | 2803ac7 | 2014-02-17 13:58:37 -0800 | [diff] [blame] | 986 | client->debug_root = debugfs_create_file(client->display_name, 0664, |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 987 | dev->clients_debug_root, |
| 988 | client, &debug_client_fops); |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 989 | if (!client->debug_root) { |
| 990 | char buf[256], *path; |
Phong Tran | 04e1435 | 2014-08-13 20:37:05 +0700 | [diff] [blame] | 991 | |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 992 | path = dentry_path(dev->clients_debug_root, buf, 256); |
| 993 | pr_err("Failed to create client debugfs at %s/%s\n", |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 994 | path, client->display_name); |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 995 | } |
| 996 | |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 997 | up_write(&dev->lock); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 998 | |
| 999 | return client; |
Mitchel Humpherys | ae5cbf4 | 2014-02-17 13:58:36 -0800 | [diff] [blame] | 1000 | |
Mitchel Humpherys | 2803ac7 | 2014-02-17 13:58:37 -0800 | [diff] [blame] | 1001 | err_free_client_name: |
| 1002 | kfree(client->name); |
Mitchel Humpherys | ae5cbf4 | 2014-02-17 13:58:36 -0800 | [diff] [blame] | 1003 | err_free_client: |
| 1004 | kfree(client); |
| 1005 | err_put_task_struct: |
| 1006 | if (task) |
| 1007 | put_task_struct(current->group_leader); |
| 1008 | return ERR_PTR(-ENOMEM); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1009 | } |
Johan Mossberg | 9122fe8 | 2013-12-13 14:24:29 -0800 | [diff] [blame] | 1010 | EXPORT_SYMBOL(ion_client_create); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1011 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1012 | void ion_client_destroy(struct ion_client *client) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1013 | { |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1014 | struct ion_device *dev = client->dev; |
| 1015 | struct rb_node *n; |
| 1016 | |
Patrick Daly | 60f0d9a | 2017-06-30 17:16:21 -0700 | [diff] [blame] | 1017 | down_write(&dev->lock); |
| 1018 | rb_erase(&client->node, &dev->clients); |
| 1019 | up_write(&dev->lock); |
| 1020 | |
| 1021 | /* After this completes, there are no more references to client */ |
| 1022 | debugfs_remove_recursive(client->debug_root); |
| 1023 | |
| 1024 | mutex_lock(&client->lock); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1025 | while ((n = rb_first(&client->handles))) { |
| 1026 | struct ion_handle *handle = rb_entry(n, struct ion_handle, |
| 1027 | node); |
| 1028 | ion_handle_destroy(&handle->ref); |
| 1029 | } |
Patrick Daly | 60f0d9a | 2017-06-30 17:16:21 -0700 | [diff] [blame] | 1030 | mutex_unlock(&client->lock); |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 1031 | |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 1032 | idr_destroy(&client->idr); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1033 | if (client->task) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1034 | put_task_struct(client->task); |
Mitchel Humpherys | 2803ac7 | 2014-02-17 13:58:37 -0800 | [diff] [blame] | 1035 | kfree(client->display_name); |
Mitchel Humpherys | ae5cbf4 | 2014-02-17 13:58:36 -0800 | [diff] [blame] | 1036 | kfree(client->name); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1037 | kfree(client); |
| 1038 | } |
Olav Haugan | ee4c8aa | 2013-12-13 14:23:55 -0800 | [diff] [blame] | 1039 | EXPORT_SYMBOL(ion_client_destroy); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1040 | |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 1041 | int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle, |
| 1042 | unsigned long *flags) |
| 1043 | { |
| 1044 | struct ion_buffer *buffer; |
| 1045 | |
| 1046 | mutex_lock(&client->lock); |
| 1047 | if (!ion_handle_validate(client, handle)) { |
| 1048 | pr_err("%s: invalid handle passed to %s.\n", |
| 1049 | __func__, __func__); |
| 1050 | mutex_unlock(&client->lock); |
| 1051 | return -EINVAL; |
| 1052 | } |
| 1053 | buffer = handle->buffer; |
| 1054 | mutex_lock(&buffer->lock); |
| 1055 | *flags = buffer->flags; |
| 1056 | mutex_unlock(&buffer->lock); |
| 1057 | mutex_unlock(&client->lock); |
| 1058 | |
| 1059 | return 0; |
| 1060 | } |
| 1061 | EXPORT_SYMBOL(ion_handle_get_flags); |
| 1062 | |
| 1063 | int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle, |
| 1064 | size_t *size) |
| 1065 | { |
| 1066 | struct ion_buffer *buffer; |
| 1067 | |
| 1068 | mutex_lock(&client->lock); |
| 1069 | if (!ion_handle_validate(client, handle)) { |
| 1070 | pr_err("%s: invalid handle passed to %s.\n", |
| 1071 | __func__, __func__); |
| 1072 | mutex_unlock(&client->lock); |
| 1073 | return -EINVAL; |
| 1074 | } |
| 1075 | buffer = handle->buffer; |
| 1076 | mutex_lock(&buffer->lock); |
| 1077 | *size = buffer->size; |
| 1078 | mutex_unlock(&buffer->lock); |
| 1079 | mutex_unlock(&client->lock); |
| 1080 | |
| 1081 | return 0; |
| 1082 | } |
| 1083 | EXPORT_SYMBOL(ion_handle_get_size); |
| 1084 | |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 1085 | /** |
| 1086 | * ion_sg_table - get an sg_table for the buffer |
| 1087 | * |
| 1088 | * NOTE: most likely you should NOT being using this API. |
| 1089 | * You should be using Ion as a DMA Buf exporter and using |
| 1090 | * the sg_table returned by dma_buf_map_attachment. |
| 1091 | */ |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1092 | struct sg_table *ion_sg_table(struct ion_client *client, |
| 1093 | struct ion_handle *handle) |
| 1094 | { |
| 1095 | struct ion_buffer *buffer; |
| 1096 | struct sg_table *table; |
| 1097 | |
| 1098 | mutex_lock(&client->lock); |
| 1099 | if (!ion_handle_validate(client, handle)) { |
| 1100 | pr_err("%s: invalid handle passed to map_dma.\n", |
| 1101 | __func__); |
| 1102 | mutex_unlock(&client->lock); |
| 1103 | return ERR_PTR(-EINVAL); |
| 1104 | } |
| 1105 | buffer = handle->buffer; |
| 1106 | table = buffer->sg_table; |
| 1107 | mutex_unlock(&client->lock); |
| 1108 | return table; |
| 1109 | } |
| 1110 | EXPORT_SYMBOL(ion_sg_table); |
| 1111 | |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 1112 | struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base, |
| 1113 | size_t chunk_size, |
| 1114 | size_t total_size) |
| 1115 | { |
| 1116 | struct sg_table *table; |
| 1117 | int i, n_chunks, ret; |
| 1118 | struct scatterlist *sg; |
| 1119 | |
| 1120 | table = kzalloc(sizeof(*table), GFP_KERNEL); |
| 1121 | if (!table) |
| 1122 | return ERR_PTR(-ENOMEM); |
| 1123 | |
| 1124 | n_chunks = DIV_ROUND_UP(total_size, chunk_size); |
| 1125 | pr_debug("creating sg_table with %d chunks\n", n_chunks); |
| 1126 | |
| 1127 | ret = sg_alloc_table(table, n_chunks, GFP_KERNEL); |
| 1128 | if (ret) |
| 1129 | goto err0; |
| 1130 | |
| 1131 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 1132 | dma_addr_t addr = buffer_base + i * chunk_size; |
| 1133 | |
| 1134 | sg_dma_address(sg) = addr; |
| 1135 | sg->length = chunk_size; |
| 1136 | } |
| 1137 | |
| 1138 | return table; |
| 1139 | err0: |
| 1140 | kfree(table); |
| 1141 | return ERR_PTR(ret); |
| 1142 | } |
| 1143 | |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 1144 | static struct sg_table *ion_dupe_sg_table(struct sg_table *orig_table) |
| 1145 | { |
| 1146 | int ret, i; |
| 1147 | struct scatterlist *sg, *sg_orig; |
| 1148 | struct sg_table *table; |
| 1149 | |
| 1150 | table = kzalloc(sizeof(*table), GFP_KERNEL); |
| 1151 | if (!table) |
| 1152 | return NULL; |
| 1153 | |
| 1154 | ret = sg_alloc_table(table, orig_table->nents, GFP_KERNEL); |
| 1155 | if (ret) { |
| 1156 | kfree(table); |
| 1157 | return NULL; |
| 1158 | } |
| 1159 | |
| 1160 | sg_orig = orig_table->sgl; |
| 1161 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 1162 | memcpy(sg, sg_orig, sizeof(*sg)); |
| 1163 | sg_orig = sg_next(sg_orig); |
| 1164 | } |
| 1165 | return table; |
| 1166 | } |
| 1167 | |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1168 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, |
| 1169 | struct device *dev, |
| 1170 | enum dma_data_direction direction); |
| 1171 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1172 | static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, |
| 1173 | enum dma_data_direction direction) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1174 | { |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1175 | struct dma_buf *dmabuf = attachment->dmabuf; |
| 1176 | struct ion_buffer *buffer = dmabuf->priv; |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 1177 | struct sg_table *table; |
| 1178 | |
| 1179 | table = ion_dupe_sg_table(buffer->sg_table); |
| 1180 | if (!table) |
| 1181 | return NULL; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1182 | |
Rebecca Schultz Zavin | 0b9ec1c | 2013-12-13 14:23:52 -0800 | [diff] [blame] | 1183 | ion_buffer_sync_for_device(buffer, attachment->dev, direction); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 1184 | return table; |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1185 | } |
| 1186 | |
| 1187 | static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, |
| 1188 | struct sg_table *table, |
| 1189 | enum dma_data_direction direction) |
| 1190 | { |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 1191 | sg_free_table(table); |
| 1192 | kfree(table); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1193 | } |
| 1194 | |
Colin Cross | e946b20 | 2013-12-13 14:25:01 -0800 | [diff] [blame] | 1195 | void ion_pages_sync_for_device(struct device *dev, struct page *page, |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1196 | size_t size, enum dma_data_direction dir) |
Colin Cross | e946b20 | 2013-12-13 14:25:01 -0800 | [diff] [blame] | 1197 | { |
| 1198 | struct scatterlist sg; |
| 1199 | |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 1200 | WARN_ONCE(!dev, "A device is required for dma_sync\n"); |
| 1201 | |
Colin Cross | e946b20 | 2013-12-13 14:25:01 -0800 | [diff] [blame] | 1202 | sg_init_table(&sg, 1); |
| 1203 | sg_set_page(&sg, page, size, 0); |
| 1204 | /* |
| 1205 | * This is not correct - sg_dma_address needs a dma_addr_t that is valid |
Tapasweni Pathak | 8e4ec4f | 2014-10-06 11:26:39 +0530 | [diff] [blame] | 1206 | * for the targeted device, but this works on the currently targeted |
Colin Cross | e946b20 | 2013-12-13 14:25:01 -0800 | [diff] [blame] | 1207 | * hardware. |
| 1208 | */ |
| 1209 | sg_dma_address(&sg) = page_to_phys(page); |
| 1210 | dma_sync_sg_for_device(dev, &sg, 1, dir); |
| 1211 | } |
| 1212 | |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1213 | struct ion_vma_list { |
| 1214 | struct list_head list; |
| 1215 | struct vm_area_struct *vma; |
| 1216 | }; |
| 1217 | |
| 1218 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, |
| 1219 | struct device *dev, |
| 1220 | enum dma_data_direction dir) |
| 1221 | { |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1222 | struct ion_vma_list *vma_list; |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 1223 | int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
| 1224 | int i; |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1225 | |
Rebecca Schultz Zavin | 13ba780 | 2013-12-13 14:24:06 -0800 | [diff] [blame] | 1226 | if (!ion_buffer_fault_user_mappings(buffer)) |
Rebecca Schultz Zavin | 0b9ec1c | 2013-12-13 14:23:52 -0800 | [diff] [blame] | 1227 | return; |
| 1228 | |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1229 | mutex_lock(&buffer->lock); |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 1230 | for (i = 0; i < pages; i++) { |
| 1231 | struct page *page = buffer->pages[i]; |
| 1232 | |
| 1233 | if (ion_buffer_page_is_dirty(page)) |
Colin Cross | e946b20 | 2013-12-13 14:25:01 -0800 | [diff] [blame] | 1234 | ion_pages_sync_for_device(dev, ion_buffer_page(page), |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1235 | PAGE_SIZE, dir); |
Colin Cross | e946b20 | 2013-12-13 14:25:01 -0800 | [diff] [blame] | 1236 | |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 1237 | ion_buffer_page_clean(buffer->pages + i); |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1238 | } |
| 1239 | list_for_each_entry(vma_list, &buffer->vmas, list) { |
| 1240 | struct vm_area_struct *vma = vma_list->vma; |
| 1241 | |
| 1242 | zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, |
| 1243 | NULL); |
| 1244 | } |
| 1245 | mutex_unlock(&buffer->lock); |
| 1246 | } |
| 1247 | |
Colin Cross | f63958d | 2013-12-13 19:26:28 -0800 | [diff] [blame] | 1248 | static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1249 | { |
| 1250 | struct ion_buffer *buffer = vma->vm_private_data; |
Colin Cross | 462be0c6 | 2013-12-13 19:26:24 -0800 | [diff] [blame] | 1251 | unsigned long pfn; |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 1252 | int ret; |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1253 | |
| 1254 | mutex_lock(&buffer->lock); |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 1255 | ion_buffer_page_dirty(buffer->pages + vmf->pgoff); |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 1256 | BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); |
Colin Cross | 462be0c6 | 2013-12-13 19:26:24 -0800 | [diff] [blame] | 1257 | |
| 1258 | pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); |
| 1259 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1260 | mutex_unlock(&buffer->lock); |
Rebecca Schultz Zavin | c13bd1c | 2013-12-13 14:24:45 -0800 | [diff] [blame] | 1261 | if (ret) |
| 1262 | return VM_FAULT_ERROR; |
| 1263 | |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1264 | return VM_FAULT_NOPAGE; |
| 1265 | } |
| 1266 | |
| 1267 | static void ion_vm_open(struct vm_area_struct *vma) |
| 1268 | { |
| 1269 | struct ion_buffer *buffer = vma->vm_private_data; |
| 1270 | struct ion_vma_list *vma_list; |
| 1271 | |
Ben Marsh | 411059f | 2016-03-28 19:26:19 +0200 | [diff] [blame] | 1272 | vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL); |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1273 | if (!vma_list) |
| 1274 | return; |
| 1275 | vma_list->vma = vma; |
| 1276 | mutex_lock(&buffer->lock); |
| 1277 | list_add(&vma_list->list, &buffer->vmas); |
| 1278 | mutex_unlock(&buffer->lock); |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1279 | } |
| 1280 | |
| 1281 | static void ion_vm_close(struct vm_area_struct *vma) |
| 1282 | { |
| 1283 | struct ion_buffer *buffer = vma->vm_private_data; |
| 1284 | struct ion_vma_list *vma_list, *tmp; |
| 1285 | |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1286 | mutex_lock(&buffer->lock); |
| 1287 | list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { |
| 1288 | if (vma_list->vma != vma) |
| 1289 | continue; |
| 1290 | list_del(&vma_list->list); |
| 1291 | kfree(vma_list); |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1292 | break; |
| 1293 | } |
| 1294 | mutex_unlock(&buffer->lock); |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 1295 | |
| 1296 | if (buffer->heap->ops->unmap_user) |
| 1297 | buffer->heap->ops->unmap_user(buffer->heap, buffer); |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1298 | } |
| 1299 | |
Kirill A. Shutemov | 7cbea8d | 2015-09-09 15:39:26 -0700 | [diff] [blame] | 1300 | static const struct vm_operations_struct ion_vma_ops = { |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1301 | .open = ion_vm_open, |
| 1302 | .close = ion_vm_close, |
| 1303 | .fault = ion_vm_fault, |
| 1304 | }; |
| 1305 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1306 | static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1307 | { |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1308 | struct ion_buffer *buffer = dmabuf->priv; |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1309 | int ret = 0; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1310 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1311 | if (!buffer->heap->ops->map_user) { |
Iulia Manda | 7287bb5 | 2014-03-11 20:10:37 +0200 | [diff] [blame] | 1312 | pr_err("%s: this heap does not define a method for mapping to userspace\n", |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1313 | __func__); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1314 | return -EINVAL; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1315 | } |
| 1316 | |
Rebecca Schultz Zavin | 13ba780 | 2013-12-13 14:24:06 -0800 | [diff] [blame] | 1317 | if (ion_buffer_fault_user_mappings(buffer)) { |
Colin Cross | 462be0c6 | 2013-12-13 19:26:24 -0800 | [diff] [blame] | 1318 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | |
| 1319 | VM_DONTDUMP; |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1320 | vma->vm_private_data = buffer; |
| 1321 | vma->vm_ops = &ion_vma_ops; |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 1322 | vma->vm_flags |= VM_MIXEDMAP; |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1323 | ion_vm_open(vma); |
Rebecca Schultz Zavin | 856661d | 2013-12-13 14:24:05 -0800 | [diff] [blame] | 1324 | return 0; |
Rebecca Schultz Zavin | 56a7c18 | 2013-12-13 14:23:50 -0800 | [diff] [blame] | 1325 | } |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1326 | |
Rebecca Schultz Zavin | 856661d | 2013-12-13 14:24:05 -0800 | [diff] [blame] | 1327 | if (!(buffer->flags & ION_FLAG_CACHED)) |
| 1328 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
| 1329 | |
| 1330 | mutex_lock(&buffer->lock); |
| 1331 | /* now map it to userspace */ |
| 1332 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); |
| 1333 | mutex_unlock(&buffer->lock); |
| 1334 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1335 | if (ret) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1336 | pr_err("%s: failure mapping buffer to userspace\n", |
| 1337 | __func__); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1338 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1339 | return ret; |
| 1340 | } |
| 1341 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1342 | static void ion_dma_buf_release(struct dma_buf *dmabuf) |
| 1343 | { |
| 1344 | struct ion_buffer *buffer = dmabuf->priv; |
Seunghun Lee | 10f6286 | 2014-05-01 01:30:23 +0900 | [diff] [blame] | 1345 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1346 | ion_buffer_put(buffer); |
| 1347 | } |
| 1348 | |
| 1349 | static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) |
| 1350 | { |
Rebecca Schultz Zavin | 0f34faf | 2013-12-13 14:23:42 -0800 | [diff] [blame] | 1351 | struct ion_buffer *buffer = dmabuf->priv; |
Rebecca Schultz Zavin | 0f34faf | 2013-12-13 14:23:42 -0800 | [diff] [blame] | 1352 | void *vaddr; |
| 1353 | |
| 1354 | if (!buffer->heap->ops->map_kernel) { |
| 1355 | pr_err("%s: map kernel is not implemented by this heap.\n", |
| 1356 | __func__); |
Hridya Valsaraju | 05030f5 | 2021-07-25 20:49:06 -0700 | [diff] [blame] | 1357 | return ERR_PTR(-ENOTTY); |
Rebecca Schultz Zavin | 0f34faf | 2013-12-13 14:23:42 -0800 | [diff] [blame] | 1358 | } |
Rebecca Schultz Zavin | 0f34faf | 2013-12-13 14:23:42 -0800 | [diff] [blame] | 1359 | mutex_lock(&buffer->lock); |
| 1360 | vaddr = ion_buffer_kmap_get(buffer); |
| 1361 | mutex_unlock(&buffer->lock); |
Hridya Valsaraju | 05030f5 | 2021-07-25 20:49:06 -0700 | [diff] [blame] | 1362 | |
| 1363 | if (IS_ERR(vaddr)) |
| 1364 | return vaddr; |
| 1365 | |
| 1366 | return vaddr + offset * PAGE_SIZE; |
| 1367 | } |
| 1368 | |
| 1369 | static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, |
| 1370 | void *ptr) |
| 1371 | { |
| 1372 | struct ion_buffer *buffer = dmabuf->priv; |
| 1373 | |
| 1374 | if (buffer->heap->ops->map_kernel) { |
| 1375 | mutex_lock(&buffer->lock); |
| 1376 | ion_buffer_kmap_put(buffer); |
| 1377 | mutex_unlock(&buffer->lock); |
| 1378 | } |
| 1379 | |
| 1380 | } |
| 1381 | |
| 1382 | static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
| 1383 | enum dma_data_direction direction) |
| 1384 | { |
| 1385 | return 0; |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1386 | } |
| 1387 | |
Chris Wilson | 18b862d | 2016-03-18 20:02:39 +0000 | [diff] [blame] | 1388 | static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
| 1389 | enum dma_data_direction direction) |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1390 | { |
Chris Wilson | 18b862d | 2016-03-18 20:02:39 +0000 | [diff] [blame] | 1391 | return 0; |
Rebecca Schultz Zavin | 0f34faf | 2013-12-13 14:23:42 -0800 | [diff] [blame] | 1392 | } |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1393 | |
Colin Cross | f63958d | 2013-12-13 19:26:28 -0800 | [diff] [blame] | 1394 | static struct dma_buf_ops dma_buf_ops = { |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1395 | .map_dma_buf = ion_map_dma_buf, |
| 1396 | .unmap_dma_buf = ion_unmap_dma_buf, |
| 1397 | .mmap = ion_mmap, |
| 1398 | .release = ion_dma_buf_release, |
Rebecca Schultz Zavin | 0f34faf | 2013-12-13 14:23:42 -0800 | [diff] [blame] | 1399 | .begin_cpu_access = ion_dma_buf_begin_cpu_access, |
| 1400 | .end_cpu_access = ion_dma_buf_end_cpu_access, |
| 1401 | .kmap_atomic = ion_dma_buf_kmap, |
| 1402 | .kunmap_atomic = ion_dma_buf_kunmap, |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1403 | .kmap = ion_dma_buf_kmap, |
| 1404 | .kunmap = ion_dma_buf_kunmap, |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1405 | }; |
| 1406 | |
Greg Hackmann | 3fedc0c | 2018-08-31 13:06:27 -0700 | [diff] [blame] | 1407 | static struct dma_buf *__ion_share_dma_buf(struct ion_client *client, |
| 1408 | struct ion_handle *handle, |
| 1409 | bool lock_client) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1410 | { |
Dmitry Kalinkin | 5605b18 | 2015-07-13 15:50:30 +0300 | [diff] [blame] | 1411 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1412 | struct ion_buffer *buffer; |
| 1413 | struct dma_buf *dmabuf; |
| 1414 | bool valid_handle; |
Sumit Semwal | d8fbe34 | 2015-01-23 12:53:43 +0530 | [diff] [blame] | 1415 | |
Greg Hackmann | 3fedc0c | 2018-08-31 13:06:27 -0700 | [diff] [blame] | 1416 | if (lock_client) |
| 1417 | mutex_lock(&client->lock); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1418 | valid_handle = ion_handle_validate(client, handle); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1419 | if (!valid_handle) { |
Olav Haugan | a9bb075 | 2013-12-13 14:23:54 -0800 | [diff] [blame] | 1420 | WARN(1, "%s: invalid handle passed to share.\n", __func__); |
Greg Hackmann | 3fedc0c | 2018-08-31 13:06:27 -0700 | [diff] [blame] | 1421 | if (lock_client) |
| 1422 | mutex_unlock(&client->lock); |
Johan Mossberg | 22ba432 | 2013-12-13 14:24:34 -0800 | [diff] [blame] | 1423 | return ERR_PTR(-EINVAL); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1424 | } |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1425 | buffer = handle->buffer; |
| 1426 | ion_buffer_get(buffer); |
Greg Hackmann | 3fedc0c | 2018-08-31 13:06:27 -0700 | [diff] [blame] | 1427 | if (lock_client) |
| 1428 | mutex_unlock(&client->lock); |
Colin Cross | 83271f6 | 2013-12-13 14:24:59 -0800 | [diff] [blame] | 1429 | |
Sumit Semwal | 72449cb | 2015-02-21 09:00:17 +0530 | [diff] [blame] | 1430 | exp_info.ops = &dma_buf_ops; |
| 1431 | exp_info.size = buffer->size; |
| 1432 | exp_info.flags = O_RDWR; |
| 1433 | exp_info.priv = buffer; |
| 1434 | |
Sumit Semwal | d8fbe34 | 2015-01-23 12:53:43 +0530 | [diff] [blame] | 1435 | dmabuf = dma_buf_export(&exp_info); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1436 | if (IS_ERR(dmabuf)) { |
| 1437 | ion_buffer_put(buffer); |
Johan Mossberg | 22ba432 | 2013-12-13 14:24:34 -0800 | [diff] [blame] | 1438 | return dmabuf; |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1439 | } |
Johan Mossberg | 22ba432 | 2013-12-13 14:24:34 -0800 | [diff] [blame] | 1440 | |
| 1441 | return dmabuf; |
| 1442 | } |
Greg Hackmann | 3fedc0c | 2018-08-31 13:06:27 -0700 | [diff] [blame] | 1443 | |
| 1444 | struct dma_buf *ion_share_dma_buf(struct ion_client *client, |
| 1445 | struct ion_handle *handle) |
| 1446 | { |
| 1447 | return __ion_share_dma_buf(client, handle, true); |
| 1448 | } |
Johan Mossberg | 22ba432 | 2013-12-13 14:24:34 -0800 | [diff] [blame] | 1449 | EXPORT_SYMBOL(ion_share_dma_buf); |
| 1450 | |
Greg Hackmann | 3fedc0c | 2018-08-31 13:06:27 -0700 | [diff] [blame] | 1451 | static int __ion_share_dma_buf_fd(struct ion_client *client, |
| 1452 | struct ion_handle *handle, bool lock_client) |
Johan Mossberg | 22ba432 | 2013-12-13 14:24:34 -0800 | [diff] [blame] | 1453 | { |
| 1454 | struct dma_buf *dmabuf; |
| 1455 | int fd; |
| 1456 | |
Greg Hackmann | 3fedc0c | 2018-08-31 13:06:27 -0700 | [diff] [blame] | 1457 | dmabuf = __ion_share_dma_buf(client, handle, lock_client); |
Johan Mossberg | 22ba432 | 2013-12-13 14:24:34 -0800 | [diff] [blame] | 1458 | if (IS_ERR(dmabuf)) |
| 1459 | return PTR_ERR(dmabuf); |
| 1460 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1461 | fd = dma_buf_fd(dmabuf, O_CLOEXEC); |
Laura Abbott | 55808b8 | 2013-12-13 14:23:57 -0800 | [diff] [blame] | 1462 | if (fd < 0) |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1463 | dma_buf_put(dmabuf); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1464 | return fd; |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1465 | } |
Greg Hackmann | 3fedc0c | 2018-08-31 13:06:27 -0700 | [diff] [blame] | 1466 | |
| 1467 | int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) |
| 1468 | { |
| 1469 | return __ion_share_dma_buf_fd(client, handle, true); |
| 1470 | } |
Johan Mossberg | 22ba432 | 2013-12-13 14:24:34 -0800 | [diff] [blame] | 1471 | EXPORT_SYMBOL(ion_share_dma_buf_fd); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1472 | |
Greg Hackmann | 3fedc0c | 2018-08-31 13:06:27 -0700 | [diff] [blame] | 1473 | int ion_share_dma_buf_fd_nolock(struct ion_client *client, |
| 1474 | struct ion_handle *handle) |
| 1475 | { |
| 1476 | return __ion_share_dma_buf_fd(client, handle, false); |
| 1477 | } |
| 1478 | |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1479 | static struct ion_handle *__ion_import_dma_buf(struct ion_client *client, |
| 1480 | struct dma_buf *dmabuf, bool lock_client) |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1481 | { |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1482 | struct ion_buffer *buffer; |
| 1483 | struct ion_handle *handle; |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 1484 | int ret; |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1485 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1486 | /* if this memory came from ion */ |
| 1487 | |
| 1488 | if (dmabuf->ops != &dma_buf_ops) { |
| 1489 | pr_err("%s: can not import dmabuf from another exporter\n", |
| 1490 | __func__); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1491 | return ERR_PTR(-EINVAL); |
| 1492 | } |
| 1493 | buffer = dmabuf->priv; |
| 1494 | |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1495 | if (lock_client) |
| 1496 | mutex_lock(&client->lock); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1497 | /* if a handle exists for this buffer just take a reference to it */ |
| 1498 | handle = ion_handle_lookup(client, buffer); |
Colin Cross | 9e90765 | 2013-12-13 14:24:49 -0800 | [diff] [blame] | 1499 | if (!IS_ERR(handle)) { |
Daniel Rosenberg | 20746c1 | 2016-12-05 16:28:28 -0800 | [diff] [blame] | 1500 | handle = ion_handle_get_check_overflow(handle); |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1501 | if (lock_client) |
| 1502 | mutex_unlock(&client->lock); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1503 | goto end; |
| 1504 | } |
Colin Cross | 83271f6 | 2013-12-13 14:24:59 -0800 | [diff] [blame] | 1505 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1506 | handle = ion_handle_create(client, buffer); |
Shawn Lin | 6fa92e2 | 2015-09-09 15:41:52 +0800 | [diff] [blame] | 1507 | if (IS_ERR(handle)) { |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1508 | if (lock_client) |
| 1509 | mutex_unlock(&client->lock); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1510 | goto end; |
Shawn Lin | 6fa92e2 | 2015-09-09 15:41:52 +0800 | [diff] [blame] | 1511 | } |
Colin Cross | 83271f6 | 2013-12-13 14:24:59 -0800 | [diff] [blame] | 1512 | |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 1513 | ret = ion_handle_add(client, handle); |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1514 | if (lock_client) |
| 1515 | mutex_unlock(&client->lock); |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 1516 | if (ret) { |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1517 | if (lock_client) |
| 1518 | ion_handle_put(handle); |
| 1519 | else |
| 1520 | ion_handle_put_nolock(handle); |
Colin Cross | 47b4045 | 2013-12-13 14:24:50 -0800 | [diff] [blame] | 1521 | handle = ERR_PTR(ret); |
| 1522 | } |
Colin Cross | 83271f6 | 2013-12-13 14:24:59 -0800 | [diff] [blame] | 1523 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1524 | end: |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1525 | return handle; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1526 | } |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1527 | |
| 1528 | struct ion_handle *ion_import_dma_buf(struct ion_client *client, |
| 1529 | struct dma_buf *dmabuf) |
| 1530 | { |
| 1531 | return __ion_import_dma_buf(client, dmabuf, true); |
| 1532 | } |
Olav Haugan | ee4c8aa | 2013-12-13 14:23:55 -0800 | [diff] [blame] | 1533 | EXPORT_SYMBOL(ion_import_dma_buf); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1534 | |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1535 | static struct ion_handle *__ion_import_dma_buf_fd(struct ion_client *client, |
| 1536 | int fd, bool lock_client) |
Rohit kumar | 9f90381 | 2016-01-12 09:31:46 +0530 | [diff] [blame] | 1537 | { |
| 1538 | struct dma_buf *dmabuf; |
| 1539 | struct ion_handle *handle; |
| 1540 | |
| 1541 | dmabuf = dma_buf_get(fd); |
| 1542 | if (IS_ERR(dmabuf)) |
| 1543 | return ERR_CAST(dmabuf); |
| 1544 | |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1545 | handle = __ion_import_dma_buf(client, dmabuf, lock_client); |
Rohit kumar | 9f90381 | 2016-01-12 09:31:46 +0530 | [diff] [blame] | 1546 | dma_buf_put(dmabuf); |
| 1547 | return handle; |
| 1548 | } |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1549 | |
| 1550 | struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd) |
| 1551 | { |
| 1552 | return __ion_import_dma_buf_fd(client, fd, true); |
| 1553 | } |
Rohit kumar | 9f90381 | 2016-01-12 09:31:46 +0530 | [diff] [blame] | 1554 | EXPORT_SYMBOL(ion_import_dma_buf_fd); |
| 1555 | |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1556 | struct ion_handle *ion_import_dma_buf_fd_nolock(struct ion_client *client, int fd) |
| 1557 | { |
| 1558 | return __ion_import_dma_buf_fd(client, fd, false); |
| 1559 | } |
| 1560 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1561 | static int ion_sync_for_device(struct ion_client *client, int fd) |
Rebecca Schultz Zavin | 0b9ec1c | 2013-12-13 14:23:52 -0800 | [diff] [blame] | 1562 | { |
| 1563 | struct dma_buf *dmabuf; |
| 1564 | struct ion_buffer *buffer; |
| 1565 | |
| 1566 | dmabuf = dma_buf_get(fd); |
Colin Cross | 9e90765 | 2013-12-13 14:24:49 -0800 | [diff] [blame] | 1567 | if (IS_ERR(dmabuf)) |
Rebecca Schultz Zavin | 0b9ec1c | 2013-12-13 14:23:52 -0800 | [diff] [blame] | 1568 | return PTR_ERR(dmabuf); |
| 1569 | |
| 1570 | /* if this memory came from ion */ |
| 1571 | if (dmabuf->ops != &dma_buf_ops) { |
| 1572 | pr_err("%s: can not sync dmabuf from another exporter\n", |
| 1573 | __func__); |
| 1574 | dma_buf_put(dmabuf); |
| 1575 | return -EINVAL; |
| 1576 | } |
| 1577 | buffer = dmabuf->priv; |
Rebecca Schultz Zavin | 856661d | 2013-12-13 14:24:05 -0800 | [diff] [blame] | 1578 | |
Liam Mark | 5326141 | 2017-12-04 10:58:55 -0800 | [diff] [blame] | 1579 | if (!is_buffer_hlos_assigned(buffer)) { |
| 1580 | pr_err("%s: cannot sync a secure dmabuf\n", __func__); |
| 1581 | dma_buf_put(dmabuf); |
| 1582 | return -EINVAL; |
| 1583 | } |
Rebecca Schultz Zavin | 856661d | 2013-12-13 14:24:05 -0800 | [diff] [blame] | 1584 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, |
| 1585 | buffer->sg_table->nents, DMA_BIDIRECTIONAL); |
Rebecca Schultz Zavin | 0b9ec1c | 2013-12-13 14:23:52 -0800 | [diff] [blame] | 1586 | dma_buf_put(dmabuf); |
| 1587 | return 0; |
| 1588 | } |
| 1589 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1590 | /* fix up the cases where the ioctl direction bits are incorrect */ |
| 1591 | static unsigned int ion_ioctl_dir(unsigned int cmd) |
Laura Abbott | 02b2380 | 2016-09-07 11:49:59 -0700 | [diff] [blame] | 1592 | { |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1593 | switch (cmd) { |
| 1594 | case ION_IOC_SYNC: |
| 1595 | case ION_IOC_FREE: |
| 1596 | case ION_IOC_CUSTOM: |
| 1597 | return _IOC_WRITE; |
| 1598 | default: |
| 1599 | return _IOC_DIR(cmd); |
| 1600 | } |
| 1601 | } |
| 1602 | |
| 1603 | static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
| 1604 | { |
| 1605 | struct ion_client *client = filp->private_data; |
Laura Abbott | 02b2380 | 2016-09-07 11:49:59 -0700 | [diff] [blame] | 1606 | struct ion_device *dev = client->dev; |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1607 | struct ion_handle *cleanup_handle = NULL; |
| 1608 | int ret = 0; |
| 1609 | unsigned int dir; |
Laura Abbott | 02b2380 | 2016-09-07 11:49:59 -0700 | [diff] [blame] | 1610 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1611 | union { |
| 1612 | struct ion_fd_data fd; |
| 1613 | struct ion_allocation_data allocation; |
| 1614 | struct ion_handle_data handle; |
| 1615 | struct ion_custom_data custom; |
| 1616 | } data; |
Laura Abbott | 02b2380 | 2016-09-07 11:49:59 -0700 | [diff] [blame] | 1617 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1618 | dir = ion_ioctl_dir(cmd); |
| 1619 | |
| 1620 | if (_IOC_SIZE(cmd) > sizeof(data)) |
| 1621 | return -EINVAL; |
| 1622 | |
| 1623 | if (dir & _IOC_WRITE) |
| 1624 | if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) |
| 1625 | return -EFAULT; |
| 1626 | |
| 1627 | switch (cmd) { |
| 1628 | case ION_IOC_ALLOC: |
| 1629 | { |
| 1630 | struct ion_handle *handle; |
| 1631 | |
Daniel Rosenberg | c30d45a | 2016-11-02 17:43:51 -0700 | [diff] [blame] | 1632 | handle = __ion_alloc(client, data.allocation.len, |
| 1633 | data.allocation.align, |
| 1634 | data.allocation.heap_id_mask, |
| 1635 | data.allocation.flags, true); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1636 | if (IS_ERR(handle)) |
| 1637 | return PTR_ERR(handle); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1638 | data.allocation.handle = handle->id; |
| 1639 | |
| 1640 | cleanup_handle = handle; |
Lee Jones | 1143950 | 2022-01-25 14:18:08 +0000 | [diff] [blame^] | 1641 | pass_to_user(handle); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1642 | break; |
| 1643 | } |
| 1644 | case ION_IOC_FREE: |
| 1645 | { |
| 1646 | struct ion_handle *handle; |
| 1647 | |
| 1648 | mutex_lock(&client->lock); |
| 1649 | handle = ion_handle_get_by_id_nolock(client, |
| 1650 | data.handle.handle); |
| 1651 | if (IS_ERR(handle)) { |
| 1652 | mutex_unlock(&client->lock); |
| 1653 | return PTR_ERR(handle); |
| 1654 | } |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 1655 | user_ion_free_nolock(client, handle); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1656 | ion_handle_put_nolock(handle); |
| 1657 | mutex_unlock(&client->lock); |
| 1658 | break; |
| 1659 | } |
| 1660 | case ION_IOC_SHARE: |
| 1661 | case ION_IOC_MAP: |
| 1662 | { |
| 1663 | struct ion_handle *handle; |
| 1664 | |
Swetha Chikkaboraiah | 6186c32 | 2020-08-05 16:51:54 +0530 | [diff] [blame] | 1665 | mutex_lock(&client->lock); |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 1666 | handle = ion_handle_get_by_id_nolock(client, data.handle.handle); |
Swetha Chikkaboraiah | 6186c32 | 2020-08-05 16:51:54 +0530 | [diff] [blame] | 1667 | if (IS_ERR(handle)) { |
| 1668 | mutex_unlock(&client->lock); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1669 | return PTR_ERR(handle); |
Swetha Chikkaboraiah | 6186c32 | 2020-08-05 16:51:54 +0530 | [diff] [blame] | 1670 | } |
| 1671 | data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle); |
| 1672 | ion_handle_put_nolock(handle); |
| 1673 | mutex_unlock(&client->lock); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1674 | if (data.fd.fd < 0) |
| 1675 | ret = data.fd.fd; |
| 1676 | break; |
| 1677 | } |
| 1678 | case ION_IOC_IMPORT: |
| 1679 | { |
| 1680 | struct ion_handle *handle; |
| 1681 | |
| 1682 | handle = ion_import_dma_buf_fd(client, data.fd.fd); |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 1683 | if (IS_ERR(handle)) { |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1684 | ret = PTR_ERR(handle); |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 1685 | } else { |
Lee Jones | 1143950 | 2022-01-25 14:18:08 +0000 | [diff] [blame^] | 1686 | data.handle.handle = handle->id; |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 1687 | handle = pass_to_user(handle); |
Lee Jones | 1143950 | 2022-01-25 14:18:08 +0000 | [diff] [blame^] | 1688 | if (IS_ERR(handle)) { |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 1689 | ret = PTR_ERR(handle); |
Lee Jones | 1143950 | 2022-01-25 14:18:08 +0000 | [diff] [blame^] | 1690 | data.handle.handle = 0; |
| 1691 | } |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 1692 | } |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1693 | break; |
| 1694 | } |
| 1695 | case ION_IOC_SYNC: |
| 1696 | { |
| 1697 | ret = ion_sync_for_device(client, data.fd.fd); |
| 1698 | break; |
| 1699 | } |
| 1700 | case ION_IOC_CUSTOM: |
| 1701 | { |
| 1702 | if (!dev->custom_ioctl) |
| 1703 | return -ENOTTY; |
| 1704 | ret = dev->custom_ioctl(client, data.custom.cmd, |
| 1705 | data.custom.arg); |
| 1706 | break; |
| 1707 | } |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 1708 | case ION_IOC_CLEAN_CACHES: |
| 1709 | return client->dev->custom_ioctl(client, |
| 1710 | ION_IOC_CLEAN_CACHES, arg); |
| 1711 | case ION_IOC_INV_CACHES: |
| 1712 | return client->dev->custom_ioctl(client, |
| 1713 | ION_IOC_INV_CACHES, arg); |
| 1714 | case ION_IOC_CLEAN_INV_CACHES: |
| 1715 | return client->dev->custom_ioctl(client, |
| 1716 | ION_IOC_CLEAN_INV_CACHES, arg); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1717 | default: |
| 1718 | return -ENOTTY; |
Laura Abbott | 02b2380 | 2016-09-07 11:49:59 -0700 | [diff] [blame] | 1719 | } |
| 1720 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1721 | if (dir & _IOC_READ) { |
| 1722 | if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { |
Daniel Rosenberg | c30d45a | 2016-11-02 17:43:51 -0700 | [diff] [blame] | 1723 | if (cleanup_handle) { |
Daniel Rosenberg | 8531a79 | 2017-02-03 20:37:06 -0800 | [diff] [blame] | 1724 | mutex_lock(&client->lock); |
| 1725 | user_ion_free_nolock(client, cleanup_handle); |
| 1726 | ion_handle_put_nolock(cleanup_handle); |
| 1727 | mutex_unlock(&client->lock); |
Daniel Rosenberg | c30d45a | 2016-11-02 17:43:51 -0700 | [diff] [blame] | 1728 | } |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1729 | return -EFAULT; |
| 1730 | } |
Laura Abbott | 02b2380 | 2016-09-07 11:49:59 -0700 | [diff] [blame] | 1731 | } |
Daniel Rosenberg | c30d45a | 2016-11-02 17:43:51 -0700 | [diff] [blame] | 1732 | if (cleanup_handle) |
| 1733 | ion_handle_put(cleanup_handle); |
Laura Abbott | 02b2380 | 2016-09-07 11:49:59 -0700 | [diff] [blame] | 1734 | return ret; |
| 1735 | } |
| 1736 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1737 | static int ion_release(struct inode *inode, struct file *file) |
| 1738 | { |
| 1739 | struct ion_client *client = file->private_data; |
| 1740 | |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1741 | ion_client_destroy(client); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1742 | return 0; |
| 1743 | } |
| 1744 | |
| 1745 | static int ion_open(struct inode *inode, struct file *file) |
| 1746 | { |
| 1747 | struct miscdevice *miscdev = file->private_data; |
| 1748 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); |
| 1749 | struct ion_client *client; |
Laura Abbott | 483ed03 | 2014-02-17 13:58:35 -0800 | [diff] [blame] | 1750 | char debug_name[64]; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1751 | |
Laura Abbott | 483ed03 | 2014-02-17 13:58:35 -0800 | [diff] [blame] | 1752 | snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); |
| 1753 | client = ion_client_create(dev, debug_name); |
Colin Cross | 9e90765 | 2013-12-13 14:24:49 -0800 | [diff] [blame] | 1754 | if (IS_ERR(client)) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1755 | return PTR_ERR(client); |
| 1756 | file->private_data = client; |
| 1757 | |
| 1758 | return 0; |
| 1759 | } |
| 1760 | |
| 1761 | static const struct file_operations ion_fops = { |
| 1762 | .owner = THIS_MODULE, |
| 1763 | .open = ion_open, |
| 1764 | .release = ion_release, |
| 1765 | .unlocked_ioctl = ion_ioctl, |
Rom Lemarchand | 827c849 | 2013-12-13 14:24:55 -0800 | [diff] [blame] | 1766 | .compat_ioctl = compat_ion_ioctl, |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1767 | }; |
| 1768 | |
| 1769 | static size_t ion_debug_heap_total(struct ion_client *client, |
Rebecca Schultz Zavin | 2bb9f50 | 2013-12-13 14:24:30 -0800 | [diff] [blame] | 1770 | unsigned int id) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1771 | { |
| 1772 | size_t size = 0; |
| 1773 | struct rb_node *n; |
| 1774 | |
| 1775 | mutex_lock(&client->lock); |
| 1776 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { |
| 1777 | struct ion_handle *handle = rb_entry(n, |
| 1778 | struct ion_handle, |
| 1779 | node); |
Rebecca Schultz Zavin | 2bb9f50 | 2013-12-13 14:24:30 -0800 | [diff] [blame] | 1780 | if (handle->buffer->heap->id == id) |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1781 | size += handle->buffer->size; |
| 1782 | } |
| 1783 | mutex_unlock(&client->lock); |
| 1784 | return size; |
| 1785 | } |
| 1786 | |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 1787 | /** |
| 1788 | * Create a mem_map of the heap. |
| 1789 | * @param s seq_file to log error message to. |
| 1790 | * @param heap The heap to create mem_map for. |
| 1791 | * @param mem_map The mem map to be created. |
| 1792 | */ |
| 1793 | void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap, |
| 1794 | struct list_head *mem_map) |
| 1795 | { |
| 1796 | struct ion_device *dev = heap->dev; |
| 1797 | struct rb_node *cnode; |
| 1798 | size_t size; |
| 1799 | struct ion_client *client; |
| 1800 | |
| 1801 | if (!heap->ops->phys) |
| 1802 | return; |
| 1803 | |
| 1804 | down_read(&dev->lock); |
| 1805 | for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) { |
| 1806 | struct rb_node *hnode; |
| 1807 | |
| 1808 | client = rb_entry(cnode, struct ion_client, node); |
| 1809 | |
| 1810 | mutex_lock(&client->lock); |
| 1811 | for (hnode = rb_first(&client->handles); |
| 1812 | hnode; |
| 1813 | hnode = rb_next(hnode)) { |
| 1814 | struct ion_handle *handle = rb_entry( |
| 1815 | hnode, struct ion_handle, node); |
| 1816 | if (handle->buffer->heap == heap) { |
| 1817 | struct mem_map_data *data = |
| 1818 | kzalloc(sizeof(*data), GFP_KERNEL); |
| 1819 | if (!data) |
| 1820 | goto inner_error; |
| 1821 | heap->ops->phys(heap, handle->buffer, |
| 1822 | &data->addr, &size); |
| 1823 | data->size = (unsigned long)size; |
| 1824 | data->addr_end = data->addr + data->size - 1; |
| 1825 | data->client_name = kstrdup(client->name, |
| 1826 | GFP_KERNEL); |
| 1827 | if (!data->client_name) { |
| 1828 | kfree(data); |
| 1829 | goto inner_error; |
| 1830 | } |
| 1831 | list_add(&data->node, mem_map); |
| 1832 | } |
| 1833 | } |
| 1834 | mutex_unlock(&client->lock); |
| 1835 | } |
| 1836 | up_read(&dev->lock); |
| 1837 | return; |
| 1838 | |
| 1839 | inner_error: |
| 1840 | seq_puts(s, |
| 1841 | "ERROR: out of memory. Part of memory map will not be logged\n"); |
| 1842 | mutex_unlock(&client->lock); |
| 1843 | up_read(&dev->lock); |
| 1844 | } |
| 1845 | |
| 1846 | /** |
| 1847 | * Free the memory allocated by ion_debug_mem_map_create |
| 1848 | * @param mem_map The mem map to free. |
| 1849 | */ |
| 1850 | static void ion_debug_mem_map_destroy(struct list_head *mem_map) |
| 1851 | { |
| 1852 | if (mem_map) { |
| 1853 | struct mem_map_data *data, *tmp; |
| 1854 | |
| 1855 | list_for_each_entry_safe(data, tmp, mem_map, node) { |
| 1856 | list_del(&data->node); |
| 1857 | kfree(data->client_name); |
| 1858 | kfree(data); |
| 1859 | } |
| 1860 | } |
| 1861 | } |
| 1862 | |
| 1863 | static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b) |
| 1864 | { |
| 1865 | struct mem_map_data *d1, *d2; |
| 1866 | |
| 1867 | d1 = list_entry(a, struct mem_map_data, node); |
| 1868 | d2 = list_entry(b, struct mem_map_data, node); |
| 1869 | if (d1->addr == d2->addr) |
| 1870 | return d1->size - d2->size; |
| 1871 | return d1->addr - d2->addr; |
| 1872 | } |
| 1873 | |
| 1874 | /** |
| 1875 | * Print heap debug information. |
| 1876 | * @param s seq_file to log message to. |
| 1877 | * @param heap pointer to heap that we will print debug information for. |
| 1878 | */ |
| 1879 | static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap) |
| 1880 | { |
| 1881 | if (heap->ops->print_debug) { |
| 1882 | struct list_head mem_map = LIST_HEAD_INIT(mem_map); |
| 1883 | |
| 1884 | ion_debug_mem_map_create(s, heap, &mem_map); |
| 1885 | list_sort(NULL, &mem_map, mem_map_cmp); |
| 1886 | heap->ops->print_debug(heap, s, &mem_map); |
| 1887 | ion_debug_mem_map_destroy(&mem_map); |
| 1888 | } |
| 1889 | } |
| 1890 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1891 | static int ion_debug_heap_show(struct seq_file *s, void *unused) |
| 1892 | { |
| 1893 | struct ion_heap *heap = s->private; |
| 1894 | struct ion_device *dev = heap->dev; |
| 1895 | struct rb_node *n; |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 1896 | size_t total_size = 0; |
| 1897 | size_t total_orphaned_size = 0; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1898 | |
Rasmus Villemoes | b569396 | 2015-02-20 14:13:19 +0100 | [diff] [blame] | 1899 | seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); |
Iulia Manda | 164ad86 | 2014-03-11 20:12:29 +0200 | [diff] [blame] | 1900 | seq_puts(s, "----------------------------------------------------\n"); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1901 | |
Patrick Daly | 60f0d9a | 2017-06-30 17:16:21 -0700 | [diff] [blame] | 1902 | down_read(&dev->lock); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1903 | for (n = rb_first(&dev->clients); n; n = rb_next(n)) { |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1904 | struct ion_client *client = rb_entry(n, struct ion_client, |
| 1905 | node); |
Rebecca Schultz Zavin | 2bb9f50 | 2013-12-13 14:24:30 -0800 | [diff] [blame] | 1906 | size_t size = ion_debug_heap_total(client, heap->id); |
Seunghun Lee | 10f6286 | 2014-05-01 01:30:23 +0900 | [diff] [blame] | 1907 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1908 | if (!size) |
| 1909 | continue; |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1910 | if (client->task) { |
| 1911 | char task_comm[TASK_COMM_LEN]; |
| 1912 | |
| 1913 | get_task_comm(task_comm, client->task); |
Rasmus Villemoes | b569396 | 2015-02-20 14:13:19 +0100 | [diff] [blame] | 1914 | seq_printf(s, "%16s %16u %16zu\n", task_comm, |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1915 | client->pid, size); |
| 1916 | } else { |
Rasmus Villemoes | b569396 | 2015-02-20 14:13:19 +0100 | [diff] [blame] | 1917 | seq_printf(s, "%16s %16u %16zu\n", client->name, |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 1918 | client->pid, size); |
| 1919 | } |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1920 | } |
Patrick Daly | 60f0d9a | 2017-06-30 17:16:21 -0700 | [diff] [blame] | 1921 | up_read(&dev->lock); |
Neil Zhang | 948c4db | 2016-01-26 17:39:06 +0800 | [diff] [blame] | 1922 | |
Iulia Manda | 164ad86 | 2014-03-11 20:12:29 +0200 | [diff] [blame] | 1923 | seq_puts(s, "----------------------------------------------------\n"); |
| 1924 | seq_puts(s, "orphaned allocations (info is from last known client):\n"); |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 1925 | mutex_lock(&dev->buffer_lock); |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 1926 | for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { |
| 1927 | struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, |
| 1928 | node); |
Rebecca Schultz Zavin | 2bb9f50 | 2013-12-13 14:24:30 -0800 | [diff] [blame] | 1929 | if (buffer->heap->id != heap->id) |
Rebecca Schultz Zavin | 45b17a8 | 2013-12-13 14:24:11 -0800 | [diff] [blame] | 1930 | continue; |
| 1931 | total_size += buffer->size; |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 1932 | if (!buffer->handle_count) { |
Rasmus Villemoes | b569396 | 2015-02-20 14:13:19 +0100 | [diff] [blame] | 1933 | seq_printf(s, "%16s %16u %16zu %d %d\n", |
Colin Cross | e61fc91 | 2013-12-13 19:26:14 -0800 | [diff] [blame] | 1934 | buffer->task_comm, buffer->pid, |
| 1935 | buffer->size, buffer->kmap_cnt, |
Benjamin Gaignard | 092c354 | 2013-12-13 14:24:22 -0800 | [diff] [blame] | 1936 | atomic_read(&buffer->ref.refcount)); |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 1937 | total_orphaned_size += buffer->size; |
| 1938 | } |
| 1939 | } |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 1940 | mutex_unlock(&dev->buffer_lock); |
Iulia Manda | 164ad86 | 2014-03-11 20:12:29 +0200 | [diff] [blame] | 1941 | seq_puts(s, "----------------------------------------------------\n"); |
Rasmus Villemoes | b569396 | 2015-02-20 14:13:19 +0100 | [diff] [blame] | 1942 | seq_printf(s, "%16s %16zu\n", "total orphaned", |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 1943 | total_orphaned_size); |
Rasmus Villemoes | b569396 | 2015-02-20 14:13:19 +0100 | [diff] [blame] | 1944 | seq_printf(s, "%16s %16zu\n", "total ", total_size); |
Colin Cross | 2540c73 | 2013-12-13 14:24:47 -0800 | [diff] [blame] | 1945 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
Rasmus Villemoes | b569396 | 2015-02-20 14:13:19 +0100 | [diff] [blame] | 1946 | seq_printf(s, "%16s %16zu\n", "deferred free", |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 1947 | heap->free_list_size); |
Iulia Manda | 164ad86 | 2014-03-11 20:12:29 +0200 | [diff] [blame] | 1948 | seq_puts(s, "----------------------------------------------------\n"); |
Rebecca Schultz Zavin | 45b17a8 | 2013-12-13 14:24:11 -0800 | [diff] [blame] | 1949 | |
| 1950 | if (heap->debug_show) |
| 1951 | heap->debug_show(heap, s, unused); |
Rebecca Schultz Zavin | 5ad7bc3 | 2013-12-13 14:24:03 -0800 | [diff] [blame] | 1952 | |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 1953 | ion_heap_print_debug(s, heap); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 1954 | return 0; |
| 1955 | } |
| 1956 | |
| 1957 | static int ion_debug_heap_open(struct inode *inode, struct file *file) |
| 1958 | { |
| 1959 | return single_open(file, ion_debug_heap_show, inode->i_private); |
| 1960 | } |
| 1961 | |
| 1962 | static const struct file_operations debug_heap_fops = { |
| 1963 | .open = ion_debug_heap_open, |
| 1964 | .read = seq_read, |
| 1965 | .llseek = seq_lseek, |
| 1966 | .release = single_release, |
| 1967 | }; |
| 1968 | |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 1969 | void show_ion_usage(struct ion_device *dev) |
| 1970 | { |
| 1971 | struct ion_heap *heap; |
| 1972 | |
| 1973 | if (!down_read_trylock(&dev->lock)) { |
| 1974 | pr_err("Ion output would deadlock, can't print debug information\n"); |
| 1975 | return; |
| 1976 | } |
| 1977 | |
| 1978 | pr_info("%16.s %16.s %16.s\n", "Heap name", "Total heap size", |
| 1979 | "Total orphaned size"); |
| 1980 | pr_info("---------------------------------\n"); |
| 1981 | plist_for_each_entry(heap, &dev->heaps, node) { |
Patrick Daly | e464006 | 2017-08-01 19:56:52 -0700 | [diff] [blame] | 1982 | pr_info("%16.s 0x%16.lx 0x%16.lx\n", |
| 1983 | heap->name, atomic_long_read(&heap->total_allocated), |
| 1984 | atomic_long_read(&heap->total_allocated) - |
| 1985 | atomic_long_read(&heap->total_handles)); |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 1986 | if (heap->debug_show) |
| 1987 | heap->debug_show(heap, NULL, 0); |
| 1988 | } |
| 1989 | up_read(&dev->lock); |
| 1990 | } |
| 1991 | |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 1992 | static int debug_shrink_set(void *data, u64 val) |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 1993 | { |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 1994 | struct ion_heap *heap = data; |
| 1995 | struct shrink_control sc; |
| 1996 | int objs; |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 1997 | |
Derek Yerger | 3b0ae7b | 2016-03-11 17:31:18 -0500 | [diff] [blame] | 1998 | sc.gfp_mask = GFP_HIGHUSER; |
Gioh Kim | aeb7fa7 | 2015-07-06 15:14:41 +0900 | [diff] [blame] | 1999 | sc.nr_to_scan = val; |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 2000 | |
Gioh Kim | aeb7fa7 | 2015-07-06 15:14:41 +0900 | [diff] [blame] | 2001 | if (!val) { |
| 2002 | objs = heap->shrinker.count_objects(&heap->shrinker, &sc); |
| 2003 | sc.nr_to_scan = objs; |
| 2004 | } |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 2005 | |
Gioh Kim | aeb7fa7 | 2015-07-06 15:14:41 +0900 | [diff] [blame] | 2006 | heap->shrinker.scan_objects(&heap->shrinker, &sc); |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 2007 | return 0; |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 2008 | } |
| 2009 | |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 2010 | static int debug_shrink_get(void *data, u64 *val) |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 2011 | { |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 2012 | struct ion_heap *heap = data; |
| 2013 | struct shrink_control sc; |
| 2014 | int objs; |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 2015 | |
Derek Yerger | 3b0ae7b | 2016-03-11 17:31:18 -0500 | [diff] [blame] | 2016 | sc.gfp_mask = GFP_HIGHUSER; |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 2017 | sc.nr_to_scan = 0; |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 2018 | |
Gioh Kim | aeb7fa7 | 2015-07-06 15:14:41 +0900 | [diff] [blame] | 2019 | objs = heap->shrinker.count_objects(&heap->shrinker, &sc); |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 2020 | *val = objs; |
| 2021 | return 0; |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 2022 | } |
| 2023 | |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 2024 | DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, |
John Stultz | e1d855b | 2013-12-13 19:26:33 -0800 | [diff] [blame] | 2025 | debug_shrink_set, "%llu\n"); |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 2026 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 2027 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) |
| 2028 | { |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 2029 | struct dentry *debug_file; |
| 2030 | |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 2031 | if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || |
| 2032 | !heap->ops->unmap_dma) |
Rebecca Schultz Zavin | 29ae6bc | 2013-12-13 14:23:43 -0800 | [diff] [blame] | 2033 | pr_err("%s: can not add heap with invalid ops struct.\n", |
| 2034 | __func__); |
| 2035 | |
Mitchel Humpherys | 95e53dd | 2015-01-08 17:24:27 -0800 | [diff] [blame] | 2036 | spin_lock_init(&heap->free_lock); |
| 2037 | heap->free_list_size = 0; |
| 2038 | |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 2039 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
| 2040 | ion_heap_init_deferred_free(heap); |
Rebecca Schultz Zavin | fe2faea | 2013-12-13 14:24:35 -0800 | [diff] [blame] | 2041 | |
Colin Cross | b9daf0b | 2014-02-17 13:58:38 -0800 | [diff] [blame] | 2042 | if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) |
| 2043 | ion_heap_init_shrinker(heap); |
| 2044 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 2045 | heap->dev = dev; |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 2046 | down_write(&dev->lock); |
Sriram Raghunathan | 7e41617 | 2015-09-22 22:35:51 +0530 | [diff] [blame] | 2047 | /* |
| 2048 | * use negative heap->id to reverse the priority -- when traversing |
| 2049 | * the list later attempt higher id numbers first |
| 2050 | */ |
Rebecca Schultz Zavin | cd69488 | 2013-12-13 14:24:25 -0800 | [diff] [blame] | 2051 | plist_node_init(&heap->node, -heap->id); |
| 2052 | plist_add(&heap->node, &dev->heaps); |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 2053 | debug_file = debugfs_create_file(heap->name, 0664, |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 2054 | dev->heaps_debug_root, heap, |
| 2055 | &debug_heap_fops); |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 2056 | |
| 2057 | if (!debug_file) { |
| 2058 | char buf[256], *path; |
Seunghun Lee | 10f6286 | 2014-05-01 01:30:23 +0900 | [diff] [blame] | 2059 | |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 2060 | path = dentry_path(dev->heaps_debug_root, buf, 256); |
| 2061 | pr_err("Failed to create heap debugfs at %s/%s\n", |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 2062 | path, heap->name); |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 2063 | } |
| 2064 | |
Gioh Kim | aeb7fa7 | 2015-07-06 15:14:41 +0900 | [diff] [blame] | 2065 | if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 2066 | char debug_name[64]; |
| 2067 | |
| 2068 | snprintf(debug_name, 64, "%s_shrink", heap->name); |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 2069 | debug_file = debugfs_create_file( |
| 2070 | debug_name, 0644, dev->heaps_debug_root, heap, |
| 2071 | &debug_shrink_fops); |
| 2072 | if (!debug_file) { |
| 2073 | char buf[256], *path; |
Seunghun Lee | 10f6286 | 2014-05-01 01:30:23 +0900 | [diff] [blame] | 2074 | |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 2075 | path = dentry_path(dev->heaps_debug_root, buf, 256); |
| 2076 | pr_err("Failed to create heap shrinker debugfs at %s/%s\n", |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 2077 | path, debug_name); |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 2078 | } |
Rebecca Schultz Zavin | ea313b5 | 2013-12-13 14:24:39 -0800 | [diff] [blame] | 2079 | } |
Gioh Kim | aeb7fa7 | 2015-07-06 15:14:41 +0900 | [diff] [blame] | 2080 | |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 2081 | up_write(&dev->lock); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 2082 | } |
Paul Gortmaker | 8c6c463 | 2015-10-13 16:46:53 -0400 | [diff] [blame] | 2083 | EXPORT_SYMBOL(ion_device_add_heap); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 2084 | |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 2085 | int ion_walk_heaps(struct ion_client *client, int heap_id, |
| 2086 | enum ion_heap_type type, void *data, |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 2087 | int (*f)(struct ion_heap *heap, void *data)) |
| 2088 | { |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 2089 | int ret_val = 0; |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 2090 | struct ion_heap *heap; |
| 2091 | struct ion_device *dev = client->dev; |
| 2092 | /* |
| 2093 | * traverse the list of heaps available in this system |
| 2094 | * and find the heap that is specified. |
| 2095 | */ |
| 2096 | down_write(&dev->lock); |
| 2097 | plist_for_each_entry(heap, &dev->heaps, node) { |
Laura Abbott | 29defcc | 2014-08-01 16:13:40 -0700 | [diff] [blame] | 2098 | if (ION_HEAP(heap->id) != heap_id || |
| 2099 | type != heap->type) |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 2100 | continue; |
| 2101 | ret_val = f(heap, data); |
| 2102 | break; |
| 2103 | } |
| 2104 | up_write(&dev->lock); |
| 2105 | return ret_val; |
| 2106 | } |
| 2107 | EXPORT_SYMBOL(ion_walk_heaps); |
| 2108 | |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 2109 | struct ion_device *ion_device_create(long (*custom_ioctl) |
| 2110 | (struct ion_client *client, |
| 2111 | unsigned int cmd, |
| 2112 | unsigned long arg)) |
| 2113 | { |
| 2114 | struct ion_device *idev; |
| 2115 | int ret; |
| 2116 | |
Ben Marsh | 411059f | 2016-03-28 19:26:19 +0200 | [diff] [blame] | 2117 | idev = kzalloc(sizeof(*idev), GFP_KERNEL); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 2118 | if (!idev) |
| 2119 | return ERR_PTR(-ENOMEM); |
| 2120 | |
| 2121 | idev->dev.minor = MISC_DYNAMIC_MINOR; |
| 2122 | idev->dev.name = "ion"; |
| 2123 | idev->dev.fops = &ion_fops; |
| 2124 | idev->dev.parent = NULL; |
| 2125 | ret = misc_register(&idev->dev); |
| 2126 | if (ret) { |
| 2127 | pr_err("ion: failed to register misc device.\n"); |
Shailendra Verma | 283d930 | 2015-05-19 20:29:00 +0530 | [diff] [blame] | 2128 | kfree(idev); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 2129 | return ERR_PTR(ret); |
| 2130 | } |
| 2131 | |
| 2132 | idev->debug_root = debugfs_create_dir("ion", NULL); |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 2133 | if (!idev->debug_root) { |
| 2134 | pr_err("ion: failed to create debugfs root directory.\n"); |
| 2135 | goto debugfs_done; |
| 2136 | } |
| 2137 | idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); |
| 2138 | if (!idev->heaps_debug_root) { |
| 2139 | pr_err("ion: failed to create debugfs heaps directory.\n"); |
| 2140 | goto debugfs_done; |
| 2141 | } |
| 2142 | idev->clients_debug_root = debugfs_create_dir("clients", |
| 2143 | idev->debug_root); |
| 2144 | if (!idev->clients_debug_root) |
| 2145 | pr_err("ion: failed to create debugfs clients directory.\n"); |
| 2146 | |
| 2147 | debugfs_done: |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 2148 | |
| 2149 | idev->custom_ioctl = custom_ioctl; |
| 2150 | idev->buffers = RB_ROOT; |
Rebecca Schultz Zavin | 8d7ab9a | 2013-12-13 14:24:16 -0800 | [diff] [blame] | 2151 | mutex_init(&idev->buffer_lock); |
| 2152 | init_rwsem(&idev->lock); |
Rebecca Schultz Zavin | cd69488 | 2013-12-13 14:24:25 -0800 | [diff] [blame] | 2153 | plist_head_init(&idev->heaps); |
Rebecca Schultz Zavin | b892bf7 | 2013-12-13 14:23:40 -0800 | [diff] [blame] | 2154 | idev->clients = RB_ROOT; |
Neil Zhang | 948c4db | 2016-01-26 17:39:06 +0800 | [diff] [blame] | 2155 | ion_root_client = &idev->clients; |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 2156 | return idev; |
| 2157 | } |
Paul Gortmaker | 8c6c463 | 2015-10-13 16:46:53 -0400 | [diff] [blame] | 2158 | EXPORT_SYMBOL(ion_device_create); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 2159 | |
| 2160 | void ion_device_destroy(struct ion_device *dev) |
| 2161 | { |
| 2162 | misc_deregister(&dev->dev); |
Mitchel Humpherys | b08585fb | 2014-02-17 13:58:34 -0800 | [diff] [blame] | 2163 | debugfs_remove_recursive(dev->debug_root); |
Rebecca Schultz Zavin | c30707b | 2013-12-13 19:38:38 -0800 | [diff] [blame] | 2164 | /* XXX need to free the heaps and clients ? */ |
| 2165 | kfree(dev); |
| 2166 | } |
Paul Gortmaker | 8c6c463 | 2015-10-13 16:46:53 -0400 | [diff] [blame] | 2167 | EXPORT_SYMBOL(ion_device_destroy); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 2168 | |
| 2169 | void __init ion_reserve(struct ion_platform_data *data) |
| 2170 | { |
| 2171 | int i; |
| 2172 | |
| 2173 | for (i = 0; i < data->nr; i++) { |
| 2174 | if (data->heaps[i].size == 0) |
| 2175 | continue; |
| 2176 | |
| 2177 | if (data->heaps[i].base == 0) { |
| 2178 | phys_addr_t paddr; |
| 2179 | |
| 2180 | paddr = memblock_alloc_base(data->heaps[i].size, |
| 2181 | data->heaps[i].align, |
| 2182 | MEMBLOCK_ALLOC_ANYWHERE); |
| 2183 | if (!paddr) { |
| 2184 | pr_err("%s: error allocating memblock for heap %d\n", |
| 2185 | __func__, i); |
| 2186 | continue; |
| 2187 | } |
| 2188 | data->heaps[i].base = paddr; |
| 2189 | } else { |
| 2190 | int ret = memblock_reserve(data->heaps[i].base, |
| 2191 | data->heaps[i].size); |
| 2192 | if (ret) |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 2193 | pr_err("memblock reserve of %zx@%pa failed\n", |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 2194 | data->heaps[i].size, |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 2195 | &data->heaps[i].base); |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 2196 | } |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 2197 | pr_info("%s: %s reserved base %pa size %zu\n", __func__, |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 2198 | data->heaps[i].name, |
Patrick Daly | eeeb940 | 2016-11-01 20:54:41 -0700 | [diff] [blame] | 2199 | &data->heaps[i].base, |
Patrick Daly | 7e8cbb4 | 2016-11-01 18:37:42 -0700 | [diff] [blame] | 2200 | data->heaps[i].size); |
| 2201 | } |
| 2202 | } |
Minming Qi | 69376be | 2018-11-01 10:47:10 +0800 | [diff] [blame] | 2203 | |
| 2204 | void lock_client(struct ion_client *client) |
| 2205 | { |
| 2206 | mutex_lock(&client->lock); |
| 2207 | } |
| 2208 | |
| 2209 | void unlock_client(struct ion_client *client) |
| 2210 | { |
| 2211 | mutex_unlock(&client->lock); |
| 2212 | } |
| 2213 | |
| 2214 | struct ion_buffer *get_buffer(struct ion_handle *handle) |
| 2215 | { |
| 2216 | return handle->buffer; |
| 2217 | } |