Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Header file for dma buffer sharing framework. |
| 3 | * |
| 4 | * Copyright(C) 2011 Linaro Limited. All rights reserved. |
| 5 | * Author: Sumit Semwal <sumit.semwal@ti.com> |
| 6 | * |
| 7 | * Many thanks to linaro-mm-sig list, and specially |
| 8 | * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and |
| 9 | * Daniel Vetter <daniel@ffwll.ch> for their support in creation and |
| 10 | * refining of this idea. |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify it |
| 13 | * under the terms of the GNU General Public License version 2 as published by |
| 14 | * the Free Software Foundation. |
| 15 | * |
| 16 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 19 | * more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU General Public License along with |
| 22 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 23 | */ |
| 24 | #ifndef __DMA_BUF_H__ |
| 25 | #define __DMA_BUF_H__ |
| 26 | |
| 27 | #include <linux/file.h> |
| 28 | #include <linux/err.h> |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 29 | #include <linux/scatterlist.h> |
| 30 | #include <linux/list.h> |
| 31 | #include <linux/dma-mapping.h> |
Rob Clark | f9a24d1 | 2012-03-16 11:04:41 -0500 | [diff] [blame] | 32 | #include <linux/fs.h> |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 33 | #include <linux/dma-fence.h> |
Maarten Lankhorst | 9b495a5 | 2014-07-01 12:57:43 +0200 | [diff] [blame] | 34 | #include <linux/wait.h> |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 35 | |
Paul Gortmaker | 313162d | 2012-01-30 11:46:54 -0500 | [diff] [blame] | 36 | struct device; |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 37 | struct dma_buf; |
| 38 | struct dma_buf_attachment; |
| 39 | |
| 40 | /** |
| 41 | * struct dma_buf_ops - operations possible on struct dma_buf |
Logan Gunthorpe | f9b67f0 | 2017-04-19 13:36:10 -0600 | [diff] [blame] | 42 | * @map_atomic: maps a page from the buffer into kernel address |
| 43 | * space, users may not block until the subsequent unmap call. |
| 44 | * This callback must not sleep. |
| 45 | * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer. |
| 46 | * This Callback must not sleep. |
| 47 | * @map: maps a page from the buffer into kernel address space. |
| 48 | * @unmap: [optional] unmaps a page from the buffer. |
Sumit Semwal | 12c4727 | 2012-05-23 15:27:40 +0530 | [diff] [blame] | 49 | * @vmap: [optional] creates a virtual mapping for the buffer into kernel |
| 50 | * address space. Same restrictions as for vmap and friends apply. |
| 51 | * @vunmap: [optional] unmaps a vmap from the buffer |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 52 | */ |
| 53 | struct dma_buf_ops { |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 54 | /** |
| 55 | * @attach: |
| 56 | * |
| 57 | * This is called from dma_buf_attach() to make sure that a given |
| 58 | * &device can access the provided &dma_buf. Exporters which support |
| 59 | * buffer objects in special locations like VRAM or device-specific |
| 60 | * carveout areas should check whether the buffer could be move to |
| 61 | * system memory (or directly accessed by the provided device), and |
| 62 | * otherwise need to fail the attach operation. |
| 63 | * |
| 64 | * The exporter should also in general check whether the current |
| 65 | * allocation fullfills the DMA constraints of the new device. If this |
| 66 | * is not the case, and the allocation cannot be moved, it should also |
| 67 | * fail the attach operation. |
| 68 | * |
Daniel Vetter | e9b4d7b | 2016-12-29 21:48:25 +0100 | [diff] [blame] | 69 | * Any exporter-private housekeeping data can be stored in the |
| 70 | * &dma_buf_attachment.priv pointer. |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 71 | * |
| 72 | * This callback is optional. |
| 73 | * |
| 74 | * Returns: |
| 75 | * |
| 76 | * 0 on success, negative error code on failure. It might return -EBUSY |
| 77 | * to signal that backing storage is already allocated and incompatible |
| 78 | * with the requirements of requesting device. |
| 79 | */ |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 80 | int (*attach)(struct dma_buf *, struct device *, |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 81 | struct dma_buf_attachment *); |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 82 | |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 83 | /** |
| 84 | * @detach: |
| 85 | * |
| 86 | * This is called by dma_buf_detach() to release a &dma_buf_attachment. |
| 87 | * Provided so that exporters can clean up any housekeeping for an |
| 88 | * &dma_buf_attachment. |
| 89 | * |
| 90 | * This callback is optional. |
| 91 | */ |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 92 | void (*detach)(struct dma_buf *, struct dma_buf_attachment *); |
| 93 | |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 94 | /** |
| 95 | * @map_dma_buf: |
| 96 | * |
| 97 | * This is called by dma_buf_map_attachment() and is used to map a |
| 98 | * shared &dma_buf into device address space, and it is mandatory. It |
| 99 | * can only be called if @attach has been called successfully. This |
| 100 | * essentially pins the DMA buffer into place, and it cannot be moved |
| 101 | * any more |
| 102 | * |
| 103 | * This call may sleep, e.g. when the backing storage first needs to be |
| 104 | * allocated, or moved to a location suitable for all currently attached |
| 105 | * devices. |
| 106 | * |
| 107 | * Note that any specific buffer attributes required for this function |
| 108 | * should get added to device_dma_parameters accessible via |
Daniel Vetter | e9b4d7b | 2016-12-29 21:48:25 +0100 | [diff] [blame] | 109 | * &device.dma_params from the &dma_buf_attachment. The @attach callback |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 110 | * should also check these constraints. |
| 111 | * |
| 112 | * If this is being called for the first time, the exporter can now |
| 113 | * choose to scan through the list of attachments for this buffer, |
| 114 | * collate the requirements of the attached devices, and choose an |
| 115 | * appropriate backing storage for the buffer. |
| 116 | * |
| 117 | * Based on enum dma_data_direction, it might be possible to have |
| 118 | * multiple users accessing at the same time (for reading, maybe), or |
| 119 | * any other kind of sharing that the exporter might wish to make |
| 120 | * available to buffer-users. |
| 121 | * |
| 122 | * Returns: |
| 123 | * |
| 124 | * A &sg_table scatter list of or the backing storage of the DMA buffer, |
| 125 | * already mapped into the device address space of the &device attached |
| 126 | * with the provided &dma_buf_attachment. |
| 127 | * |
| 128 | * On failure, returns a negative error value wrapped into a pointer. |
| 129 | * May also return -EINTR when a signal was received while being |
| 130 | * blocked. |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 131 | */ |
| 132 | struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 133 | enum dma_data_direction); |
| 134 | /** |
| 135 | * @unmap_dma_buf: |
| 136 | * |
| 137 | * This is called by dma_buf_unmap_attachment() and should unmap and |
| 138 | * release the &sg_table allocated in @map_dma_buf, and it is mandatory. |
| 139 | * It should also unpin the backing storage if this is the last mapping |
| 140 | * of the DMA buffer, it the exporter supports backing storage |
| 141 | * migration. |
| 142 | */ |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 143 | void (*unmap_dma_buf)(struct dma_buf_attachment *, |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 144 | struct sg_table *, |
| 145 | enum dma_data_direction); |
| 146 | |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 147 | /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY |
| 148 | * if the call would block. |
| 149 | */ |
| 150 | |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 151 | /** |
| 152 | * @release: |
| 153 | * |
| 154 | * Called after the last dma_buf_put to release the &dma_buf, and |
| 155 | * mandatory. |
| 156 | */ |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 157 | void (*release)(struct dma_buf *); |
| 158 | |
Daniel Vetter | 0959a16 | 2016-12-09 19:53:08 +0100 | [diff] [blame] | 159 | /** |
| 160 | * @begin_cpu_access: |
| 161 | * |
| 162 | * This is called from dma_buf_begin_cpu_access() and allows the |
| 163 | * exporter to ensure that the memory is actually available for cpu |
| 164 | * access - the exporter might need to allocate or swap-in and pin the |
| 165 | * backing storage. The exporter also needs to ensure that cpu access is |
| 166 | * coherent for the access direction. The direction can be used by the |
| 167 | * exporter to optimize the cache flushing, i.e. access with a different |
| 168 | * direction (read instead of write) might return stale or even bogus |
| 169 | * data (e.g. when the exporter needs to copy the data to temporary |
| 170 | * storage). |
| 171 | * |
| 172 | * This callback is optional. |
| 173 | * |
| 174 | * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command |
| 175 | * from userspace (where storage shouldn't be pinned to avoid handing |
| 176 | * de-factor mlock rights to userspace) and for the kernel-internal |
| 177 | * users of the various kmap interfaces, where the backing storage must |
| 178 | * be pinned to guarantee that the atomic kmap calls can succeed. Since |
| 179 | * there's no in-kernel users of the kmap interfaces yet this isn't a |
| 180 | * real problem. |
| 181 | * |
| 182 | * Returns: |
| 183 | * |
| 184 | * 0 on success or a negative error code on failure. This can for |
| 185 | * example fail when the backing storage can't be allocated. Can also |
| 186 | * return -ERESTARTSYS or -EINTR when the call has been interrupted and |
| 187 | * needs to be restarted. |
| 188 | */ |
Tiago Vignatti | 831e9da | 2015-12-22 19:36:45 -0200 | [diff] [blame] | 189 | int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); |
Daniel Vetter | 0959a16 | 2016-12-09 19:53:08 +0100 | [diff] [blame] | 190 | |
| 191 | /** |
| 192 | * @end_cpu_access: |
| 193 | * |
| 194 | * This is called from dma_buf_end_cpu_access() when the importer is |
| 195 | * done accessing the CPU. The exporter can use this to flush caches and |
| 196 | * unpin any resources pinned in @begin_cpu_access. |
| 197 | * The result of any dma_buf kmap calls after end_cpu_access is |
| 198 | * undefined. |
| 199 | * |
| 200 | * This callback is optional. |
| 201 | * |
| 202 | * Returns: |
| 203 | * |
| 204 | * 0 on success or a negative error code on failure. Can return |
| 205 | * -ERESTARTSYS or -EINTR when the call has been interrupted and needs |
| 206 | * to be restarted. |
| 207 | */ |
Chris Wilson | 18b862d | 2016-03-18 20:02:39 +0000 | [diff] [blame] | 208 | int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); |
Logan Gunthorpe | f9b67f0 | 2017-04-19 13:36:10 -0600 | [diff] [blame] | 209 | void *(*map_atomic)(struct dma_buf *, unsigned long); |
| 210 | void (*unmap_atomic)(struct dma_buf *, unsigned long, void *); |
| 211 | void *(*map)(struct dma_buf *, unsigned long); |
| 212 | void (*unmap)(struct dma_buf *, unsigned long, void *); |
Daniel Vetter | 4c78513 | 2012-04-24 14:38:52 +0530 | [diff] [blame] | 213 | |
Daniel Vetter | 0959a16 | 2016-12-09 19:53:08 +0100 | [diff] [blame] | 214 | /** |
| 215 | * @mmap: |
| 216 | * |
| 217 | * This callback is used by the dma_buf_mmap() function |
| 218 | * |
| 219 | * Note that the mapping needs to be incoherent, userspace is expected |
| 220 | * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. |
| 221 | * |
| 222 | * Because dma-buf buffers have invariant size over their lifetime, the |
| 223 | * dma-buf core checks whether a vma is too large and rejects such |
| 224 | * mappings. The exporter hence does not need to duplicate this check. |
| 225 | * Drivers do not need to check this themselves. |
| 226 | * |
| 227 | * If an exporter needs to manually flush caches and hence needs to fake |
| 228 | * coherency for mmap support, it needs to be able to zap all the ptes |
| 229 | * pointing at the backing storage. Now linux mm needs a struct |
| 230 | * address_space associated with the struct file stored in vma->vm_file |
| 231 | * to do that with the function unmap_mapping_range. But the dma_buf |
| 232 | * framework only backs every dma_buf fd with the anon_file struct file, |
| 233 | * i.e. all dma_bufs share the same file. |
| 234 | * |
| 235 | * Hence exporters need to setup their own file (and address_space) |
| 236 | * association by setting vma->vm_file and adjusting vma->vm_pgoff in |
| 237 | * the dma_buf mmap callback. In the specific case of a gem driver the |
| 238 | * exporter could use the shmem file already provided by gem (and set |
| 239 | * vm_pgoff = 0). Exporters can then zap ptes by unmapping the |
| 240 | * corresponding range of the struct address_space associated with their |
| 241 | * own file. |
| 242 | * |
| 243 | * This callback is optional. |
| 244 | * |
| 245 | * Returns: |
| 246 | * |
| 247 | * 0 on success or a negative error code on failure. |
| 248 | */ |
Daniel Vetter | 4c78513 | 2012-04-24 14:38:52 +0530 | [diff] [blame] | 249 | int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); |
Dave Airlie | 98f86c9 | 2012-05-20 12:33:56 +0530 | [diff] [blame] | 250 | |
| 251 | void *(*vmap)(struct dma_buf *); |
| 252 | void (*vunmap)(struct dma_buf *, void *vaddr); |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 253 | }; |
| 254 | |
| 255 | /** |
| 256 | * struct dma_buf - shared buffer object |
| 257 | * @size: size of the buffer |
| 258 | * @file: file pointer used for sharing buffers across, and for refcounting. |
| 259 | * @attachments: list of dma_buf_attachment that denotes all devices attached. |
| 260 | * @ops: dma_buf_ops associated with this buffer object. |
Rob Clark | e2082e3 | 2016-03-31 16:26:50 -0400 | [diff] [blame] | 261 | * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap |
| 262 | * @vmapping_counter: used internally to refcnt the vmaps |
| 263 | * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 |
Sumit Semwal | 78df969 | 2013-03-22 18:22:16 +0530 | [diff] [blame] | 264 | * @exp_name: name of the exporter; useful for debugging. |
Sumit Semwal | 9abdffe | 2015-05-05 14:56:15 +0530 | [diff] [blame] | 265 | * @owner: pointer to exporter module; used for refcounting when exporter is a |
| 266 | * kernel module. |
Sumit Semwal | b89e356 | 2013-04-04 11:44:37 +0530 | [diff] [blame] | 267 | * @list_node: node for dma_buf accounting and debugging. |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 268 | * @priv: exporter specific private data for this buffer object. |
Maarten Lankhorst | 3aac450 | 2014-07-01 12:57:26 +0200 | [diff] [blame] | 269 | * @resv: reservation object linked to this dma-buf |
Rob Clark | e2082e3 | 2016-03-31 16:26:50 -0400 | [diff] [blame] | 270 | * @poll: for userspace poll support |
| 271 | * @cb_excl: for userspace poll support |
| 272 | * @cb_shared: for userspace poll support |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 273 | * |
| 274 | * This represents a shared buffer, created by calling dma_buf_export(). The |
| 275 | * userspace representation is a normal file descriptor, which can be created by |
| 276 | * calling dma_buf_fd(). |
| 277 | * |
| 278 | * Shared dma buffers are reference counted using dma_buf_put() and |
| 279 | * get_dma_buf(). |
| 280 | * |
Daniel Vetter | f641d3b | 2016-12-29 21:48:24 +0100 | [diff] [blame] | 281 | * Device DMA access is handled by the separate &struct dma_buf_attachment. |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 282 | */ |
| 283 | struct dma_buf { |
| 284 | size_t size; |
| 285 | struct file *file; |
| 286 | struct list_head attachments; |
| 287 | const struct dma_buf_ops *ops; |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 288 | struct mutex lock; |
Daniel Vetter | f00b4da | 2012-12-20 14:14:23 +0100 | [diff] [blame] | 289 | unsigned vmapping_counter; |
| 290 | void *vmap_ptr; |
Sumit Semwal | 78df969 | 2013-03-22 18:22:16 +0530 | [diff] [blame] | 291 | const char *exp_name; |
Sumit Semwal | 9abdffe | 2015-05-05 14:56:15 +0530 | [diff] [blame] | 292 | struct module *owner; |
Sumit Semwal | b89e356 | 2013-04-04 11:44:37 +0530 | [diff] [blame] | 293 | struct list_head list_node; |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 294 | void *priv; |
Maarten Lankhorst | 3aac450 | 2014-07-01 12:57:26 +0200 | [diff] [blame] | 295 | struct reservation_object *resv; |
Maarten Lankhorst | 9b495a5 | 2014-07-01 12:57:43 +0200 | [diff] [blame] | 296 | |
| 297 | /* poll support */ |
| 298 | wait_queue_head_t poll; |
| 299 | |
| 300 | struct dma_buf_poll_cb_t { |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 301 | struct dma_fence_cb cb; |
Maarten Lankhorst | 9b495a5 | 2014-07-01 12:57:43 +0200 | [diff] [blame] | 302 | wait_queue_head_t *poll; |
| 303 | |
Al Viro | 01e5d55 | 2017-07-03 23:53:17 -0400 | [diff] [blame] | 304 | __poll_t active; |
Maarten Lankhorst | 9b495a5 | 2014-07-01 12:57:43 +0200 | [diff] [blame] | 305 | } cb_excl, cb_shared; |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 306 | }; |
| 307 | |
| 308 | /** |
| 309 | * struct dma_buf_attachment - holds device-buffer attachment data |
| 310 | * @dmabuf: buffer for this attachment. |
| 311 | * @dev: device attached to the buffer. |
| 312 | * @node: list of dma_buf_attachment. |
| 313 | * @priv: exporter specific attachment data. |
| 314 | * |
| 315 | * This structure holds the attachment information between the dma_buf buffer |
| 316 | * and its user device(s). The list contains one attachment struct per device |
| 317 | * attached to the buffer. |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 318 | * |
| 319 | * An attachment is created by calling dma_buf_attach(), and released again by |
| 320 | * calling dma_buf_detach(). The DMA mapping itself needed to initiate a |
| 321 | * transfer is created by dma_buf_map_attachment() and freed again by calling |
| 322 | * dma_buf_unmap_attachment(). |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 323 | */ |
| 324 | struct dma_buf_attachment { |
| 325 | struct dma_buf *dmabuf; |
| 326 | struct device *dev; |
| 327 | struct list_head node; |
| 328 | void *priv; |
| 329 | }; |
| 330 | |
Rob Clark | f9a24d1 | 2012-03-16 11:04:41 -0500 | [diff] [blame] | 331 | /** |
Sumit Semwal | d8fbe34 | 2015-01-23 12:53:43 +0530 | [diff] [blame] | 332 | * struct dma_buf_export_info - holds information needed to export a dma_buf |
Sumit Semwal | 9abdffe | 2015-05-05 14:56:15 +0530 | [diff] [blame] | 333 | * @exp_name: name of the exporter - useful for debugging. |
| 334 | * @owner: pointer to exporter module - used for refcounting kernel module |
Sumit Semwal | d8fbe34 | 2015-01-23 12:53:43 +0530 | [diff] [blame] | 335 | * @ops: Attach allocator-defined dma buf ops to the new buffer |
| 336 | * @size: Size of the buffer |
| 337 | * @flags: mode flags for the file |
| 338 | * @resv: reservation-object, NULL to allocate default one |
| 339 | * @priv: Attach private data of allocator to this buffer |
| 340 | * |
| 341 | * This structure holds the information required to export the buffer. Used |
| 342 | * with dma_buf_export() only. |
| 343 | */ |
| 344 | struct dma_buf_export_info { |
| 345 | const char *exp_name; |
Sumit Semwal | 9abdffe | 2015-05-05 14:56:15 +0530 | [diff] [blame] | 346 | struct module *owner; |
Sumit Semwal | d8fbe34 | 2015-01-23 12:53:43 +0530 | [diff] [blame] | 347 | const struct dma_buf_ops *ops; |
| 348 | size_t size; |
| 349 | int flags; |
| 350 | struct reservation_object *resv; |
| 351 | void *priv; |
| 352 | }; |
| 353 | |
| 354 | /** |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 355 | * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters |
Rob Clark | e2082e3 | 2016-03-31 16:26:50 -0400 | [diff] [blame] | 356 | * @name: export-info name |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 357 | * |
Daniel Vetter | f641d3b | 2016-12-29 21:48:24 +0100 | [diff] [blame] | 358 | * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, |
Daniel Vetter | 2904a8c | 2016-12-09 19:53:07 +0100 | [diff] [blame] | 359 | * zeroes it out and pre-populates exp_name in it. |
Sumit Semwal | d8fbe34 | 2015-01-23 12:53:43 +0530 | [diff] [blame] | 360 | */ |
Rob Clark | e2082e3 | 2016-03-31 16:26:50 -0400 | [diff] [blame] | 361 | #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ |
| 362 | struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ |
Sumit Semwal | 9abdffe | 2015-05-05 14:56:15 +0530 | [diff] [blame] | 363 | .owner = THIS_MODULE } |
Sumit Semwal | d8fbe34 | 2015-01-23 12:53:43 +0530 | [diff] [blame] | 364 | |
| 365 | /** |
Rob Clark | f9a24d1 | 2012-03-16 11:04:41 -0500 | [diff] [blame] | 366 | * get_dma_buf - convenience wrapper for get_file. |
| 367 | * @dmabuf: [in] pointer to dma_buf |
| 368 | * |
| 369 | * Increments the reference count on the dma-buf, needed in case of drivers |
| 370 | * that either need to create additional references to the dmabuf on the |
| 371 | * kernel side. For example, an exporter that needs to keep a dmabuf ptr |
| 372 | * so that subsequent exports don't create a new dmabuf. |
| 373 | */ |
| 374 | static inline void get_dma_buf(struct dma_buf *dmabuf) |
| 375 | { |
| 376 | get_file(dmabuf->file); |
| 377 | } |
| 378 | |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 379 | struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, |
| 380 | struct device *dev); |
| 381 | void dma_buf_detach(struct dma_buf *dmabuf, |
| 382 | struct dma_buf_attachment *dmabuf_attach); |
Sumit Semwal | 78df969 | 2013-03-22 18:22:16 +0530 | [diff] [blame] | 383 | |
Sumit Semwal | d8fbe34 | 2015-01-23 12:53:43 +0530 | [diff] [blame] | 384 | struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); |
Sumit Semwal | 78df969 | 2013-03-22 18:22:16 +0530 | [diff] [blame] | 385 | |
Dave Airlie | 55c1c4c | 2012-03-16 10:34:02 +0000 | [diff] [blame] | 386 | int dma_buf_fd(struct dma_buf *dmabuf, int flags); |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 387 | struct dma_buf *dma_buf_get(int fd); |
| 388 | void dma_buf_put(struct dma_buf *dmabuf); |
| 389 | |
| 390 | struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, |
| 391 | enum dma_data_direction); |
Sumit Semwal | 33ea2dc | 2012-01-27 15:09:27 +0530 | [diff] [blame] | 392 | void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, |
| 393 | enum dma_data_direction); |
Tiago Vignatti | 831e9da | 2015-12-22 19:36:45 -0200 | [diff] [blame] | 394 | int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, |
Daniel Vetter | fc13020 | 2012-03-20 00:02:37 +0100 | [diff] [blame] | 395 | enum dma_data_direction dir); |
Chris Wilson | 18b862d | 2016-03-18 20:02:39 +0000 | [diff] [blame] | 396 | int dma_buf_end_cpu_access(struct dma_buf *dma_buf, |
| 397 | enum dma_data_direction dir); |
Daniel Vetter | fc13020 | 2012-03-20 00:02:37 +0100 | [diff] [blame] | 398 | void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); |
| 399 | void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); |
| 400 | void *dma_buf_kmap(struct dma_buf *, unsigned long); |
| 401 | void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); |
Daniel Vetter | 4c78513 | 2012-04-24 14:38:52 +0530 | [diff] [blame] | 402 | |
| 403 | int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, |
| 404 | unsigned long); |
Dave Airlie | 98f86c9 | 2012-05-20 12:33:56 +0530 | [diff] [blame] | 405 | void *dma_buf_vmap(struct dma_buf *); |
| 406 | void dma_buf_vunmap(struct dma_buf *, void *vaddr); |
Sumit Semwal | d15bd7e | 2011-12-26 14:53:15 +0530 | [diff] [blame] | 407 | #endif /* __DMA_BUF_H__ */ |