blob: 22865baa03b15fe49b8be3f0e6e3e7f545f910dc [file] [log] [blame]
Inki Dae1c248b72011-10-04 19:19:01 +09001/* exynos_drm_buf.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
Inki Daed81aecb2012-12-18 02:30:17 +09006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
Inki Dae1c248b72011-10-04 19:19:01 +090010 */
11
David Howells760285e2012-10-02 18:01:07 +010012#include <drm/drmP.h>
13#include <drm/exynos_drm.h>
Inki Dae1c248b72011-10-04 19:19:01 +090014
15#include "exynos_drm_drv.h"
Inki Dae2c871122011-11-12 15:23:32 +090016#include "exynos_drm_gem.h"
Inki Dae1c248b72011-10-04 19:19:01 +090017#include "exynos_drm_buf.h"
Inki Dae694be452012-12-26 18:06:01 +090018#include "exynos_drm_iommu.h"
Inki Dae1c248b72011-10-04 19:19:01 +090019
Inki Dae1c248b72011-10-04 19:19:01 +090020static int lowlevel_buffer_allocate(struct drm_device *dev,
Inki Dae2b358922012-03-16 18:47:05 +090021 unsigned int flags, struct exynos_drm_gem_buf *buf)
Inki Dae1c248b72011-10-04 19:19:01 +090022{
Inki Dae0519f9a2012-10-20 07:53:42 -070023 int ret = 0;
Inki Dae1169af212012-12-14 14:34:31 +090024 enum dma_attr attr;
Inki Dae4744ad22012-12-07 17:51:27 +090025 unsigned int nr_pages;
Inki Dae2b358922012-03-16 18:47:05 +090026
Inki Dae2b358922012-03-16 18:47:05 +090027 if (buf->dma_addr) {
28 DRM_DEBUG_KMS("already allocated.\n");
29 return 0;
30 }
31
Inki Dae0519f9a2012-10-20 07:53:42 -070032 init_dma_attrs(&buf->dma_attrs);
33
Inki Dae1169af212012-12-14 14:34:31 +090034 /*
35 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
36 * region will be allocated else physically contiguous
37 * as possible.
38 */
Inki Dae1dcfe232012-12-27 19:54:23 +090039 if (!(flags & EXYNOS_BO_NONCONTIG))
Inki Dae1169af212012-12-14 14:34:31 +090040 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
41
42 /*
43 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
44 * else cachable mapping.
45 */
46 if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
Inki Dae0519f9a2012-10-20 07:53:42 -070047 attr = DMA_ATTR_WRITE_COMBINE;
Inki Dae1169af212012-12-14 14:34:31 +090048 else
49 attr = DMA_ATTR_NON_CONSISTENT;
Inki Dae0519f9a2012-10-20 07:53:42 -070050
51 dma_set_attr(attr, &buf->dma_attrs);
Inki Dae4744ad22012-12-07 17:51:27 +090052 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
Inki Dae0519f9a2012-10-20 07:53:42 -070053
Inki Dae694be452012-12-26 18:06:01 +090054 nr_pages = buf->size >> PAGE_SHIFT;
55
56 if (!is_drm_iommu_supported(dev)) {
57 dma_addr_t start_addr;
58 unsigned int i = 0;
59
60 buf->pages = kzalloc(sizeof(struct page) * nr_pages,
61 GFP_KERNEL);
62 if (!buf->pages) {
63 DRM_ERROR("failed to allocate pages.\n");
64 return -ENOMEM;
65 }
66
67 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
68 &buf->dma_addr, GFP_KERNEL,
69 &buf->dma_attrs);
70 if (!buf->kvaddr) {
71 DRM_ERROR("failed to allocate buffer.\n");
72 kfree(buf->pages);
73 return -ENOMEM;
74 }
75
76 start_addr = buf->dma_addr;
77 while (i < nr_pages) {
78 buf->pages[i] = phys_to_page(start_addr);
79 start_addr += PAGE_SIZE;
80 i++;
81 }
82 } else {
83
84 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
85 &buf->dma_addr, GFP_KERNEL,
86 &buf->dma_attrs);
87 if (!buf->pages) {
88 DRM_ERROR("failed to allocate buffer.\n");
89 return -ENOMEM;
90 }
Inki Dae2b358922012-03-16 18:47:05 +090091 }
92
Inki Dae4744ad22012-12-07 17:51:27 +090093 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
Inki Dae2b358922012-03-16 18:47:05 +090094 if (!buf->sgt) {
Inki Dae4744ad22012-12-07 17:51:27 +090095 DRM_ERROR("failed to get sg table.\n");
Inki Dae61db75d2012-04-03 21:49:15 +090096 ret = -ENOMEM;
Inki Dae0519f9a2012-10-20 07:53:42 -070097 goto err_free_attrs;
Inki Dae61db75d2012-04-03 21:49:15 +090098 }
Inki Dae2b358922012-03-16 18:47:05 +090099
Inki Dae4744ad22012-12-07 17:51:27 +0900100 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
Inki Dae2b358922012-03-16 18:47:05 +0900101 (unsigned long)buf->dma_addr,
102 buf->size);
103
104 return ret;
Inki Dae0519f9a2012-10-20 07:53:42 -0700105
Inki Dae0519f9a2012-10-20 07:53:42 -0700106err_free_attrs:
Inki Dae4744ad22012-12-07 17:51:27 +0900107 dma_free_attrs(dev->dev, buf->size, buf->pages,
Inki Dae0519f9a2012-10-20 07:53:42 -0700108 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
109 buf->dma_addr = (dma_addr_t)NULL;
Inki Dae2b358922012-03-16 18:47:05 +0900110
Inki Dae694be452012-12-26 18:06:01 +0900111 if (!is_drm_iommu_supported(dev))
112 kfree(buf->pages);
113
Inki Dae2b358922012-03-16 18:47:05 +0900114 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900115}
116
117static void lowlevel_buffer_deallocate(struct drm_device *dev,
Inki Dae2b358922012-03-16 18:47:05 +0900118 unsigned int flags, struct exynos_drm_gem_buf *buf)
Inki Dae1c248b72011-10-04 19:19:01 +0900119{
Inki Dae2b358922012-03-16 18:47:05 +0900120 if (!buf->dma_addr) {
121 DRM_DEBUG_KMS("dma_addr is invalid.\n");
122 return;
123 }
124
Inki Dae4744ad22012-12-07 17:51:27 +0900125 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
Inki Dae2b358922012-03-16 18:47:05 +0900126 (unsigned long)buf->dma_addr,
127 buf->size);
128
129 sg_free_table(buf->sgt);
130
131 kfree(buf->sgt);
132 buf->sgt = NULL;
133
Inki Dae694be452012-12-26 18:06:01 +0900134 if (!is_drm_iommu_supported(dev)) {
135 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
Inki Dae0519f9a2012-10-20 07:53:42 -0700136 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
Inki Dae694be452012-12-26 18:06:01 +0900137 kfree(buf->pages);
138 } else
139 dma_free_attrs(dev->dev, buf->size, buf->pages,
140 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
141
Inki Dae2b358922012-03-16 18:47:05 +0900142 buf->dma_addr = (dma_addr_t)NULL;
Inki Dae1c248b72011-10-04 19:19:01 +0900143}
144
Inki Dae2b358922012-03-16 18:47:05 +0900145struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
146 unsigned int size)
Inki Dae1c248b72011-10-04 19:19:01 +0900147{
Inki Dae2c871122011-11-12 15:23:32 +0900148 struct exynos_drm_gem_buf *buffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900149
Inki Dae2c871122011-11-12 15:23:32 +0900150 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
Inki Dae1c248b72011-10-04 19:19:01 +0900151
Inki Dae2c871122011-11-12 15:23:32 +0900152 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
153 if (!buffer) {
154 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900155 return NULL;
Inki Dae1c248b72011-10-04 19:19:01 +0900156 }
157
Inki Dae2c871122011-11-12 15:23:32 +0900158 buffer->size = size;
Inki Dae2c871122011-11-12 15:23:32 +0900159 return buffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900160}
161
Inki Dae2b358922012-03-16 18:47:05 +0900162void exynos_drm_fini_buf(struct drm_device *dev,
163 struct exynos_drm_gem_buf *buffer)
Inki Dae1c248b72011-10-04 19:19:01 +0900164{
Inki Dae2c871122011-11-12 15:23:32 +0900165 if (!buffer) {
166 DRM_DEBUG_KMS("buffer is null.\n");
Inki Dae1c248b72011-10-04 19:19:01 +0900167 return;
168 }
169
Inki Dae2c871122011-11-12 15:23:32 +0900170 kfree(buffer);
171 buffer = NULL;
Inki Dae1c248b72011-10-04 19:19:01 +0900172}
173
Inki Dae2b358922012-03-16 18:47:05 +0900174int exynos_drm_alloc_buf(struct drm_device *dev,
175 struct exynos_drm_gem_buf *buf, unsigned int flags)
176{
177
178 /*
179 * allocate memory region and set the memory information
180 * to vaddr and dma_addr of a buffer object.
181 */
182 if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
183 return -ENOMEM;
184
185 return 0;
186}
187
188void exynos_drm_free_buf(struct drm_device *dev,
189 unsigned int flags, struct exynos_drm_gem_buf *buffer)
190{
191
192 lowlevel_buffer_deallocate(dev, flags, buffer);
193}