Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 1 | /* |
Rob Clark | 8bb0daf | 2013-02-11 12:43:09 -0500 | [diff] [blame] | 2 | * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2011 Texas Instruments |
| 5 | * Author: Rob Clark <rob.clark@linaro.org> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify it |
| 8 | * under the terms of the GNU General Public License version 2 as published by |
| 9 | * the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 14 | * more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License along with |
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #include "omap_drv.h" |
| 21 | |
| 22 | #include <linux/dma-buf.h> |
| 23 | |
| 24 | static struct sg_table *omap_gem_map_dma_buf( |
| 25 | struct dma_buf_attachment *attachment, |
| 26 | enum dma_data_direction dir) |
| 27 | { |
| 28 | struct drm_gem_object *obj = attachment->dmabuf->priv; |
| 29 | struct sg_table *sg; |
| 30 | dma_addr_t paddr; |
| 31 | int ret; |
| 32 | |
| 33 | sg = kzalloc(sizeof(*sg), GFP_KERNEL); |
| 34 | if (!sg) |
| 35 | return ERR_PTR(-ENOMEM); |
| 36 | |
| 37 | /* camera, etc, need physically contiguous.. but we need a |
| 38 | * better way to know this.. |
| 39 | */ |
| 40 | ret = omap_gem_get_paddr(obj, &paddr, true); |
| 41 | if (ret) |
| 42 | goto out; |
| 43 | |
| 44 | ret = sg_alloc_table(sg, 1, GFP_KERNEL); |
| 45 | if (ret) |
| 46 | goto out; |
| 47 | |
| 48 | sg_init_table(sg->sgl, 1); |
| 49 | sg_dma_len(sg->sgl) = obj->size; |
| 50 | sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0); |
| 51 | sg_dma_address(sg->sgl) = paddr; |
| 52 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 53 | /* this should be after _get_paddr() to ensure we have pages attached */ |
| 54 | omap_gem_dma_sync(obj, dir); |
| 55 | |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 56 | return sg; |
Cong Ding | 32ac1a5 | 2013-01-15 20:46:50 +0100 | [diff] [blame] | 57 | out: |
| 58 | kfree(sg); |
| 59 | return ERR_PTR(ret); |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 60 | } |
| 61 | |
| 62 | static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, |
| 63 | struct sg_table *sg, enum dma_data_direction dir) |
| 64 | { |
| 65 | struct drm_gem_object *obj = attachment->dmabuf->priv; |
| 66 | omap_gem_put_paddr(obj); |
| 67 | sg_free_table(sg); |
| 68 | kfree(sg); |
| 69 | } |
| 70 | |
| 71 | static void omap_gem_dmabuf_release(struct dma_buf *buffer) |
| 72 | { |
| 73 | struct drm_gem_object *obj = buffer->priv; |
| 74 | /* release reference that was taken when dmabuf was exported |
| 75 | * in omap_gem_prime_set().. |
| 76 | */ |
| 77 | drm_gem_object_unreference_unlocked(obj); |
| 78 | } |
| 79 | |
| 80 | |
| 81 | static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, |
| 82 | size_t start, size_t len, enum dma_data_direction dir) |
| 83 | { |
| 84 | struct drm_gem_object *obj = buffer->priv; |
| 85 | struct page **pages; |
| 86 | if (omap_gem_flags(obj) & OMAP_BO_TILED) { |
| 87 | /* TODO we would need to pin at least part of the buffer to |
| 88 | * get de-tiled view. For now just reject it. |
| 89 | */ |
| 90 | return -ENOMEM; |
| 91 | } |
| 92 | /* make sure we have the pages: */ |
| 93 | return omap_gem_get_pages(obj, &pages, true); |
| 94 | } |
| 95 | |
| 96 | static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, |
| 97 | size_t start, size_t len, enum dma_data_direction dir) |
| 98 | { |
| 99 | struct drm_gem_object *obj = buffer->priv; |
| 100 | omap_gem_put_pages(obj); |
| 101 | } |
| 102 | |
| 103 | |
| 104 | static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer, |
| 105 | unsigned long page_num) |
| 106 | { |
| 107 | struct drm_gem_object *obj = buffer->priv; |
| 108 | struct page **pages; |
| 109 | omap_gem_get_pages(obj, &pages, false); |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 110 | omap_gem_cpu_sync(obj, page_num); |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 111 | return kmap_atomic(pages[page_num]); |
| 112 | } |
| 113 | |
| 114 | static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer, |
| 115 | unsigned long page_num, void *addr) |
| 116 | { |
| 117 | kunmap_atomic(addr); |
| 118 | } |
| 119 | |
| 120 | static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer, |
| 121 | unsigned long page_num) |
| 122 | { |
| 123 | struct drm_gem_object *obj = buffer->priv; |
| 124 | struct page **pages; |
| 125 | omap_gem_get_pages(obj, &pages, false); |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 126 | omap_gem_cpu_sync(obj, page_num); |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 127 | return kmap(pages[page_num]); |
| 128 | } |
| 129 | |
| 130 | static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer, |
| 131 | unsigned long page_num, void *addr) |
| 132 | { |
| 133 | struct drm_gem_object *obj = buffer->priv; |
| 134 | struct page **pages; |
| 135 | omap_gem_get_pages(obj, &pages, false); |
| 136 | kunmap(pages[page_num]); |
| 137 | } |
| 138 | |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 139 | static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, |
| 140 | struct vm_area_struct *vma) |
| 141 | { |
| 142 | struct drm_gem_object *obj = buffer->priv; |
YoungJun Cho | 4368dd8 | 2013-06-27 08:39:58 +0900 | [diff] [blame] | 143 | struct drm_device *dev = obj->dev; |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 144 | int ret = 0; |
| 145 | |
| 146 | if (WARN_ON(!obj->filp)) |
| 147 | return -EINVAL; |
| 148 | |
YoungJun Cho | 4368dd8 | 2013-06-27 08:39:58 +0900 | [diff] [blame] | 149 | mutex_lock(&dev->struct_mutex); |
Laurent Pinchart | bda3fda | 2013-04-16 14:21:23 +0200 | [diff] [blame] | 150 | ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); |
YoungJun Cho | 4368dd8 | 2013-06-27 08:39:58 +0900 | [diff] [blame] | 151 | mutex_unlock(&dev->struct_mutex); |
Laurent Pinchart | bda3fda | 2013-04-16 14:21:23 +0200 | [diff] [blame] | 152 | if (ret < 0) |
| 153 | return ret; |
Rob Clark | 8b6b569 | 2012-05-17 02:37:25 -0600 | [diff] [blame] | 154 | |
| 155 | return omap_gem_mmap_obj(obj, vma); |
| 156 | } |
| 157 | |
Tomi Valkeinen | 6717cd2 | 2013-04-10 10:44:00 +0300 | [diff] [blame] | 158 | static struct dma_buf_ops omap_dmabuf_ops = { |
Laurent Pinchart | 222025e | 2015-01-11 00:02:07 +0200 | [diff] [blame] | 159 | .map_dma_buf = omap_gem_map_dma_buf, |
| 160 | .unmap_dma_buf = omap_gem_unmap_dma_buf, |
| 161 | .release = omap_gem_dmabuf_release, |
| 162 | .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, |
| 163 | .end_cpu_access = omap_gem_dmabuf_end_cpu_access, |
| 164 | .kmap_atomic = omap_gem_dmabuf_kmap_atomic, |
| 165 | .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic, |
| 166 | .kmap = omap_gem_dmabuf_kmap, |
| 167 | .kunmap = omap_gem_dmabuf_kunmap, |
| 168 | .mmap = omap_gem_dmabuf_mmap, |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 169 | }; |
| 170 | |
YAMANE Toshiaki | 11d3d27 | 2012-11-14 19:40:14 +0900 | [diff] [blame] | 171 | struct dma_buf *omap_gem_prime_export(struct drm_device *dev, |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 172 | struct drm_gem_object *obj, int flags) |
| 173 | { |
Sumit Semwal | d8fbe34 | 2015-01-23 12:53:43 +0530 | [diff] [blame] | 174 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
| 175 | |
| 176 | exp_info.ops = &omap_dmabuf_ops; |
| 177 | exp_info.size = obj->size; |
| 178 | exp_info.flags = flags; |
| 179 | exp_info.priv = obj; |
| 180 | |
| 181 | return dma_buf_export(&exp_info); |
Rob Clark | 6ad11bc | 2012-04-10 13:19:55 -0500 | [diff] [blame] | 182 | } |
Rob Clark | 3080b83 | 2012-05-17 02:37:26 -0600 | [diff] [blame] | 183 | |
YAMANE Toshiaki | 11d3d27 | 2012-11-14 19:40:14 +0900 | [diff] [blame] | 184 | struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, |
Rob Clark | 3080b83 | 2012-05-17 02:37:26 -0600 | [diff] [blame] | 185 | struct dma_buf *buffer) |
| 186 | { |
| 187 | struct drm_gem_object *obj; |
| 188 | |
| 189 | /* is this one of own objects? */ |
| 190 | if (buffer->ops == &omap_dmabuf_ops) { |
| 191 | obj = buffer->priv; |
| 192 | /* is it from our device? */ |
| 193 | if (obj->dev == dev) { |
Seung-Woo Kim | be8a42a | 2012-09-27 15:30:06 +0900 | [diff] [blame] | 194 | /* |
| 195 | * Importing dmabuf exported from out own gem increases |
| 196 | * refcount on gem itself instead of f_count of dmabuf. |
| 197 | */ |
Rob Clark | 3080b83 | 2012-05-17 02:37:26 -0600 | [diff] [blame] | 198 | drm_gem_object_reference(obj); |
| 199 | return obj; |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | /* |
| 204 | * TODO add support for importing buffers from other devices.. |
| 205 | * for now we don't need this but would be nice to add eventually |
| 206 | */ |
| 207 | return ERR_PTR(-EINVAL); |
| 208 | } |