Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/gpu/ion/ion_priv.h |
| 3 | * |
| 4 | * Copyright (C) 2011 Google, Inc. |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 5 | * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 6 | * |
| 7 | * This software is licensed under the terms of the GNU General Public |
| 8 | * License version 2, as published by the Free Software Foundation, and |
| 9 | * may be copied, distributed, and modified under those terms. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | */ |
| 17 | |
| 18 | #ifndef _ION_PRIV_H |
| 19 | #define _ION_PRIV_H |
| 20 | |
| 21 | #include <linux/kref.h> |
| 22 | #include <linux/mm_types.h> |
| 23 | #include <linux/mutex.h> |
| 24 | #include <linux/rbtree.h> |
| 25 | #include <linux/ion.h> |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 26 | #include <linux/iommu.h> |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 27 | |
| 28 | struct ion_mapping; |
| 29 | |
| 30 | struct ion_dma_mapping { |
| 31 | struct kref ref; |
| 32 | struct scatterlist *sglist; |
| 33 | }; |
| 34 | |
| 35 | struct ion_kernel_mapping { |
| 36 | struct kref ref; |
| 37 | void *vaddr; |
| 38 | }; |
| 39 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 40 | /** |
| 41 | * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu |
| 42 | * @iova_addr - iommu virtual address |
| 43 | * @node - rb node to exist in the buffer's tree of iommu mappings |
| 44 | * @domain_info - contains the partition number and domain number |
| 45 | * domain_info[1] = domain number |
| 46 | * domain_info[0] = partition number |
| 47 | * @ref - for reference counting this mapping |
| 48 | * @mapped_size - size of the iova space mapped |
| 49 | * (may not be the same as the buffer size) |
| 50 | * |
| 51 | * Represents a mapping of one ion buffer to a particular iommu domain |
| 52 | * and address range. There may exist other mappings of this buffer in |
| 53 | * different domains or address ranges. All mappings will have the same |
| 54 | * cacheability and security. |
| 55 | */ |
| 56 | struct ion_iommu_map { |
| 57 | unsigned long iova_addr; |
| 58 | struct rb_node node; |
| 59 | union { |
| 60 | int domain_info[2]; |
| 61 | uint64_t key; |
| 62 | }; |
| 63 | struct ion_buffer *buffer; |
| 64 | struct kref ref; |
| 65 | int mapped_size; |
| 66 | }; |
| 67 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 68 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); |
| 69 | |
| 70 | /** |
| 71 | * struct ion_buffer - metadata for a particular buffer |
| 72 | * @ref: refernce count |
| 73 | * @node: node in the ion_device buffers tree |
| 74 | * @dev: back pointer to the ion_device |
| 75 | * @heap: back pointer to the heap the buffer came from |
| 76 | * @flags: buffer specific flags |
| 77 | * @size: size of the buffer |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 78 | * @priv_virt: private data to the buffer representable as |
| 79 | * a void * |
| 80 | * @priv_phys: private data to the buffer representable as |
| 81 | * an ion_phys_addr_t (and someday a phys_addr_t) |
| 82 | * @lock: protects the buffers cnt fields |
| 83 | * @kmap_cnt: number of times the buffer is mapped to the kernel |
| 84 | * @vaddr: the kenrel mapping if kmap_cnt is not zero |
| 85 | * @dmap_cnt: number of times the buffer is mapped for dma |
| 86 | * @sglist: the scatterlist for the buffer is dmap_cnt is not zero |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 87 | */ |
| 88 | struct ion_buffer { |
| 89 | struct kref ref; |
| 90 | struct rb_node node; |
| 91 | struct ion_device *dev; |
| 92 | struct ion_heap *heap; |
| 93 | unsigned long flags; |
| 94 | size_t size; |
| 95 | union { |
| 96 | void *priv_virt; |
| 97 | ion_phys_addr_t priv_phys; |
| 98 | }; |
| 99 | struct mutex lock; |
| 100 | int kmap_cnt; |
| 101 | void *vaddr; |
| 102 | int dmap_cnt; |
| 103 | struct scatterlist *sglist; |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 104 | int umap_cnt; |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 105 | unsigned int iommu_map_cnt; |
| 106 | struct rb_root iommu_maps; |
Laura Abbott | 404f824 | 2011-10-31 14:22:53 -0700 | [diff] [blame] | 107 | int marked; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 108 | }; |
| 109 | |
| 110 | /** |
| 111 | * struct ion_heap_ops - ops to operate on a given heap |
| 112 | * @allocate: allocate memory |
| 113 | * @free: free memory |
| 114 | * @phys get physical address of a buffer (only define on |
| 115 | * physically contiguous heaps) |
| 116 | * @map_dma map the memory for dma to a scatterlist |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 117 | * @unmap_dma unmap the memory for dma |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 118 | * @map_kernel map memory to the kernel |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 119 | * @unmap_kernel unmap memory to the kernel |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 120 | * @map_user map memory to userspace |
Alex Bird | 8a3ede3 | 2011-11-07 12:33:42 -0800 | [diff] [blame] | 121 | * @unmap_user unmap memory to userspace |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 122 | */ |
| 123 | struct ion_heap_ops { |
| 124 | int (*allocate) (struct ion_heap *heap, |
| 125 | struct ion_buffer *buffer, unsigned long len, |
| 126 | unsigned long align, unsigned long flags); |
| 127 | void (*free) (struct ion_buffer *buffer); |
| 128 | int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, |
| 129 | ion_phys_addr_t *addr, size_t *len); |
| 130 | struct scatterlist *(*map_dma) (struct ion_heap *heap, |
| 131 | struct ion_buffer *buffer); |
| 132 | void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 133 | void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer, |
| 134 | unsigned long flags); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 135 | void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); |
| 136 | int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, |
Laura Abbott | 894fd58 | 2011-08-19 13:33:56 -0700 | [diff] [blame] | 137 | struct vm_area_struct *vma, unsigned long flags); |
Alex Bird | 8a3ede3 | 2011-11-07 12:33:42 -0800 | [diff] [blame] | 138 | void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer); |
Laura Abbott | abcb6f7 | 2011-10-04 16:26:49 -0700 | [diff] [blame] | 139 | int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer, |
| 140 | void *vaddr, unsigned int offset, |
| 141 | unsigned int length, unsigned int cmd); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 142 | int (*map_iommu)(struct ion_buffer *buffer, |
| 143 | struct ion_iommu_map *map_data, |
| 144 | unsigned int domain_num, |
| 145 | unsigned int partition_num, |
| 146 | unsigned long align, |
| 147 | unsigned long iova_length, |
| 148 | unsigned long flags); |
| 149 | void (*unmap_iommu)(struct ion_iommu_map *data); |
Olav Haugan | 3d4fe1a | 2012-01-13 11:42:15 -0800 | [diff] [blame] | 150 | int (*print_debug)(struct ion_heap *heap, struct seq_file *s); |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 151 | int (*secure_heap)(struct ion_heap *heap); |
| 152 | int (*unsecure_heap)(struct ion_heap *heap); |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 153 | }; |
| 154 | |
| 155 | /** |
| 156 | * struct ion_heap - represents a heap in the system |
| 157 | * @node: rb node to put the heap on the device's tree of heaps |
| 158 | * @dev: back pointer to the ion_device |
| 159 | * @type: type of heap |
| 160 | * @ops: ops struct as above |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 161 | * @id: id of heap, also indicates priority of this heap when |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 162 | * allocating. These are specified by platform data and |
| 163 | * MUST be unique |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 164 | * @name: used for debugging |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 165 | * |
| 166 | * Represents a pool of memory from which buffers can be made. In some |
| 167 | * systems the only heap is regular system memory allocated via vmalloc. |
| 168 | * On others, some blocks might require large physically contiguous buffers |
| 169 | * that are allocated from a specially reserved heap. |
| 170 | */ |
| 171 | struct ion_heap { |
| 172 | struct rb_node node; |
| 173 | struct ion_device *dev; |
| 174 | enum ion_heap_type type; |
| 175 | struct ion_heap_ops *ops; |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 176 | int id; |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 177 | const char *name; |
| 178 | }; |
| 179 | |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 180 | |
| 181 | |
| 182 | #define iommu_map_domain(__m) ((__m)->domain_info[1]) |
| 183 | #define iommu_map_partition(__m) ((__m)->domain_info[0]) |
| 184 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 185 | /** |
| 186 | * ion_device_create - allocates and returns an ion device |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 187 | * @custom_ioctl: arch specific ioctl function if applicable |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 188 | * |
| 189 | * returns a valid device or -PTR_ERR |
| 190 | */ |
| 191 | struct ion_device *ion_device_create(long (*custom_ioctl) |
| 192 | (struct ion_client *client, |
| 193 | unsigned int cmd, |
| 194 | unsigned long arg)); |
| 195 | |
| 196 | /** |
| 197 | * ion_device_destroy - free and device and it's resource |
| 198 | * @dev: the device |
| 199 | */ |
| 200 | void ion_device_destroy(struct ion_device *dev); |
| 201 | |
| 202 | /** |
| 203 | * ion_device_add_heap - adds a heap to the ion device |
| 204 | * @dev: the device |
| 205 | * @heap: the heap to add |
| 206 | */ |
| 207 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); |
| 208 | |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 209 | /** |
| 210 | * functions for creating and destroying the built in ion heaps. |
| 211 | * architectures can add their own custom architecture specific |
| 212 | * heaps as appropriate. |
| 213 | */ |
| 214 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 215 | struct ion_heap *ion_heap_create(struct ion_platform_heap *); |
| 216 | void ion_heap_destroy(struct ion_heap *); |
| 217 | |
| 218 | struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); |
| 219 | void ion_system_heap_destroy(struct ion_heap *); |
| 220 | |
| 221 | struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); |
| 222 | void ion_system_contig_heap_destroy(struct ion_heap *); |
| 223 | |
| 224 | struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); |
| 225 | void ion_carveout_heap_destroy(struct ion_heap *); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 226 | |
| 227 | struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *); |
| 228 | void ion_iommu_heap_destroy(struct ion_heap *); |
| 229 | |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 230 | struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *); |
| 231 | void ion_cp_heap_destroy(struct ion_heap *); |
| 232 | |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame^] | 233 | struct ion_heap *ion_reusable_heap_create(struct ion_platform_heap *); |
| 234 | void ion_reusable_heap_destroy(struct ion_heap *); |
| 235 | |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 236 | /** |
| 237 | * kernel api to allocate/free from carveout -- used when carveout is |
| 238 | * used to back an architecture specific custom heap |
| 239 | */ |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 240 | ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, |
| 241 | unsigned long align); |
| 242 | void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, |
| 243 | unsigned long size); |
Laura Abbott | 8c01736 | 2011-09-22 20:59:12 -0700 | [diff] [blame] | 244 | |
| 245 | |
| 246 | struct ion_heap *msm_get_contiguous_heap(void); |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 247 | /** |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 248 | * The carveout/cp heap returns physical addresses, since 0 may be a valid |
Rebecca Schultz Zavin | e6ee124 | 2011-06-30 12:19:55 -0700 | [diff] [blame] | 249 | * physical address, this is used to indicate allocation failed |
| 250 | */ |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 251 | #define ION_CARVEOUT_ALLOCATE_FAIL -1 |
Olav Haugan | 0a85251 | 2012-01-09 10:20:55 -0800 | [diff] [blame] | 252 | #define ION_CP_ALLOCATE_FAIL -1 |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 253 | |
Laura Abbott | caafeea | 2011-12-13 11:43:10 -0800 | [diff] [blame^] | 254 | /** |
| 255 | * The reserved heap returns physical addresses, since 0 may be a valid |
| 256 | * physical address, this is used to indicate allocation failed |
| 257 | */ |
| 258 | #define ION_RESERVED_ALLOCATE_FAIL -1 |
| 259 | |
| 260 | /** |
| 261 | * ion_map_fmem_buffer - map fmem allocated memory into the kernel |
| 262 | * @buffer - buffer to map |
| 263 | * @phys_base - physical base of the heap |
| 264 | * @virt_base - virtual base of the heap |
| 265 | * @flags - flags for the heap |
| 266 | * |
| 267 | * Map fmem allocated memory into the kernel address space. This |
| 268 | * is designed to be used by other heaps that need fmem behavior. |
| 269 | * The virtual range must be pre-allocated. |
| 270 | */ |
| 271 | void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base, |
| 272 | void *virt_base, unsigned long flags); |
| 273 | |
Rebecca Schultz Zavin | c80005a | 2011-06-29 19:44:29 -0700 | [diff] [blame] | 274 | #endif /* _ION_PRIV_H */ |