blob: 4a3a5f72ed4a42938dd38b998752c63baf953304 [file] [log] [blame]
Inki Dae1c248b72011-10-04 19:19:01 +09001/* exynos_drm_buf.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include "drmP.h"
27#include "drm.h"
Inki Dae2b358922012-03-16 18:47:05 +090028#include "exynos_drm.h"
Inki Dae1c248b72011-10-04 19:19:01 +090029
30#include "exynos_drm_drv.h"
Inki Dae2c871122011-11-12 15:23:32 +090031#include "exynos_drm_gem.h"
Inki Dae1c248b72011-10-04 19:19:01 +090032#include "exynos_drm_buf.h"
33
Inki Dae1c248b72011-10-04 19:19:01 +090034static int lowlevel_buffer_allocate(struct drm_device *dev,
Inki Dae2b358922012-03-16 18:47:05 +090035 unsigned int flags, struct exynos_drm_gem_buf *buf)
Inki Dae1c248b72011-10-04 19:19:01 +090036{
Inki Dae2b358922012-03-16 18:47:05 +090037 dma_addr_t start_addr, end_addr;
38 unsigned int npages, page_size, i = 0;
39 struct scatterlist *sgl;
40 int ret = 0;
41
Inki Dae1c248b72011-10-04 19:19:01 +090042 DRM_DEBUG_KMS("%s\n", __FILE__);
43
Inki Dae2b358922012-03-16 18:47:05 +090044 if (flags & EXYNOS_BO_NONCONTIG) {
45 DRM_DEBUG_KMS("not support allocation type.\n");
46 return -EINVAL;
47 }
48
49 if (buf->dma_addr) {
50 DRM_DEBUG_KMS("already allocated.\n");
51 return 0;
52 }
53
54 if (buf->size >= SZ_1M) {
55 npages = (buf->size >> SECTION_SHIFT) + 1;
56 page_size = SECTION_SIZE;
57 } else if (buf->size >= SZ_64K) {
58 npages = (buf->size >> 16) + 1;
59 page_size = SZ_64K;
60 } else {
61 npages = (buf->size >> PAGE_SHIFT) + 1;
62 page_size = PAGE_SIZE;
63 }
64
65 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
66 if (!buf->sgt) {
67 DRM_ERROR("failed to allocate sg table.\n");
Inki Dae1c248b72011-10-04 19:19:01 +090068 return -ENOMEM;
69 }
70
Inki Dae2b358922012-03-16 18:47:05 +090071 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
72 if (ret < 0) {
73 DRM_ERROR("failed to initialize sg table.\n");
74 kfree(buf->sgt);
75 buf->sgt = NULL;
76 return -ENOMEM;
77 }
Inki Dae1c248b72011-10-04 19:19:01 +090078
Inki Dae2b358922012-03-16 18:47:05 +090079 buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
80 &buf->dma_addr, GFP_KERNEL);
81 if (!buf->kvaddr) {
82 DRM_ERROR("failed to allocate buffer.\n");
83 ret = -ENOMEM;
84 goto err1;
85 }
86
87 start_addr = buf->dma_addr;
88 end_addr = buf->dma_addr + buf->size;
89
90 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
91 if (!buf->pages) {
92 DRM_ERROR("failed to allocate pages.\n");
93 ret = -ENOMEM;
94 goto err2;
95 }
96
97 start_addr = buf->dma_addr;
98 end_addr = buf->dma_addr + buf->size;
99
100 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
101 if (!buf->pages) {
102 DRM_ERROR("failed to allocate pages.\n");
103 ret = -ENOMEM;
104 goto err2;
105 }
106
107 sgl = buf->sgt->sgl;
108
109 while (i < npages) {
110 buf->pages[i] = phys_to_page(start_addr);
111 sg_set_page(sgl, buf->pages[i], page_size, 0);
112 sg_dma_address(sgl) = start_addr;
113 start_addr += page_size;
114 if (end_addr - start_addr < page_size)
115 break;
116 sgl = sg_next(sgl);
117 i++;
118 }
119
120 buf->pages[i] = phys_to_page(start_addr);
121
122 sgl = sg_next(sgl);
123 sg_set_page(sgl, buf->pages[i+1], end_addr - start_addr, 0);
124
125 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
126 (unsigned long)buf->kvaddr,
127 (unsigned long)buf->dma_addr,
128 buf->size);
129
130 return ret;
131err2:
132 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
133 (dma_addr_t)buf->dma_addr);
134 buf->dma_addr = (dma_addr_t)NULL;
135err1:
136 sg_free_table(buf->sgt);
137 kfree(buf->sgt);
138 buf->sgt = NULL;
139
140 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900141}
142
143static void lowlevel_buffer_deallocate(struct drm_device *dev,
Inki Dae2b358922012-03-16 18:47:05 +0900144 unsigned int flags, struct exynos_drm_gem_buf *buf)
Inki Dae1c248b72011-10-04 19:19:01 +0900145{
146 DRM_DEBUG_KMS("%s.\n", __FILE__);
147
Inki Dae2b358922012-03-16 18:47:05 +0900148 /*
149 * release only physically continuous memory and
150 * non-continuous memory would be released by exynos
151 * gem framework.
152 */
153 if (flags & EXYNOS_BO_NONCONTIG) {
154 DRM_DEBUG_KMS("not support allocation type.\n");
155 return;
156 }
157
158 if (!buf->dma_addr) {
159 DRM_DEBUG_KMS("dma_addr is invalid.\n");
160 return;
161 }
162
163 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
164 (unsigned long)buf->kvaddr,
165 (unsigned long)buf->dma_addr,
166 buf->size);
167
168 sg_free_table(buf->sgt);
169
170 kfree(buf->sgt);
171 buf->sgt = NULL;
172
173 kfree(buf->pages);
174 buf->pages = NULL;
175
176 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
177 (dma_addr_t)buf->dma_addr);
178 buf->dma_addr = (dma_addr_t)NULL;
Inki Dae1c248b72011-10-04 19:19:01 +0900179}
180
Inki Dae2b358922012-03-16 18:47:05 +0900181struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
182 unsigned int size)
Inki Dae1c248b72011-10-04 19:19:01 +0900183{
Inki Dae2c871122011-11-12 15:23:32 +0900184 struct exynos_drm_gem_buf *buffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900185
186 DRM_DEBUG_KMS("%s.\n", __FILE__);
Inki Dae2c871122011-11-12 15:23:32 +0900187 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
Inki Dae1c248b72011-10-04 19:19:01 +0900188
Inki Dae2c871122011-11-12 15:23:32 +0900189 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
190 if (!buffer) {
191 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900192 return NULL;
Inki Dae1c248b72011-10-04 19:19:01 +0900193 }
194
Inki Dae2c871122011-11-12 15:23:32 +0900195 buffer->size = size;
Inki Dae2c871122011-11-12 15:23:32 +0900196 return buffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900197}
198
Inki Dae2b358922012-03-16 18:47:05 +0900199void exynos_drm_fini_buf(struct drm_device *dev,
200 struct exynos_drm_gem_buf *buffer)
Inki Dae1c248b72011-10-04 19:19:01 +0900201{
202 DRM_DEBUG_KMS("%s.\n", __FILE__);
203
Inki Dae2c871122011-11-12 15:23:32 +0900204 if (!buffer) {
205 DRM_DEBUG_KMS("buffer is null.\n");
Inki Dae1c248b72011-10-04 19:19:01 +0900206 return;
207 }
208
Inki Dae2c871122011-11-12 15:23:32 +0900209 kfree(buffer);
210 buffer = NULL;
Inki Dae1c248b72011-10-04 19:19:01 +0900211}
212
Inki Dae2b358922012-03-16 18:47:05 +0900213int exynos_drm_alloc_buf(struct drm_device *dev,
214 struct exynos_drm_gem_buf *buf, unsigned int flags)
215{
216
217 /*
218 * allocate memory region and set the memory information
219 * to vaddr and dma_addr of a buffer object.
220 */
221 if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
222 return -ENOMEM;
223
224 return 0;
225}
226
227void exynos_drm_free_buf(struct drm_device *dev,
228 unsigned int flags, struct exynos_drm_gem_buf *buffer)
229{
230
231 lowlevel_buffer_deallocate(dev, flags, buffer);
232}