blob: 245c9ae187a14351547ba21be4c5608b9ab80066 [file] [log] [blame]
Inki Dae1c248b72011-10-04 19:19:01 +09001/* exynos_drm_buf.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
Inki Daed81aecb2012-12-18 02:30:17 +09006 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
Inki Dae1c248b72011-10-04 19:19:01 +090010 */
11
David Howells760285e2012-10-02 18:01:07 +010012#include <drm/drmP.h>
13#include <drm/exynos_drm.h>
Inki Dae1c248b72011-10-04 19:19:01 +090014
15#include "exynos_drm_drv.h"
Inki Dae2c871122011-11-12 15:23:32 +090016#include "exynos_drm_gem.h"
Inki Dae1c248b72011-10-04 19:19:01 +090017#include "exynos_drm_buf.h"
Inki Dae694be452012-12-26 18:06:01 +090018#include "exynos_drm_iommu.h"
Inki Dae1c248b72011-10-04 19:19:01 +090019
Inki Dae1c248b72011-10-04 19:19:01 +090020static int lowlevel_buffer_allocate(struct drm_device *dev,
Inki Dae2b358922012-03-16 18:47:05 +090021 unsigned int flags, struct exynos_drm_gem_buf *buf)
Inki Dae1c248b72011-10-04 19:19:01 +090022{
Inki Dae0519f9a2012-10-20 07:53:42 -070023 int ret = 0;
Inki Dae1169af212012-12-14 14:34:31 +090024 enum dma_attr attr;
Inki Dae4744ad22012-12-07 17:51:27 +090025 unsigned int nr_pages;
Inki Dae2b358922012-03-16 18:47:05 +090026
Inki Dae2b358922012-03-16 18:47:05 +090027 if (buf->dma_addr) {
28 DRM_DEBUG_KMS("already allocated.\n");
29 return 0;
30 }
31
Inki Dae0519f9a2012-10-20 07:53:42 -070032 init_dma_attrs(&buf->dma_attrs);
33
Inki Dae1169af212012-12-14 14:34:31 +090034 /*
35 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
36 * region will be allocated else physically contiguous
37 * as possible.
38 */
Inki Dae1dcfe232012-12-27 19:54:23 +090039 if (!(flags & EXYNOS_BO_NONCONTIG))
Inki Dae1169af212012-12-14 14:34:31 +090040 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
41
42 /*
43 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
44 * else cachable mapping.
45 */
46 if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
Inki Dae0519f9a2012-10-20 07:53:42 -070047 attr = DMA_ATTR_WRITE_COMBINE;
Inki Dae1169af212012-12-14 14:34:31 +090048 else
49 attr = DMA_ATTR_NON_CONSISTENT;
Inki Dae0519f9a2012-10-20 07:53:42 -070050
51 dma_set_attr(attr, &buf->dma_attrs);
Inki Dae4744ad22012-12-07 17:51:27 +090052 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
Inki Dae0519f9a2012-10-20 07:53:42 -070053
Inki Dae694be452012-12-26 18:06:01 +090054 nr_pages = buf->size >> PAGE_SHIFT;
55
56 if (!is_drm_iommu_supported(dev)) {
57 dma_addr_t start_addr;
58 unsigned int i = 0;
59
YoungJun Choaf51a5e2013-07-03 17:09:19 +090060 buf->pages = drm_calloc_large(nr_pages, sizeof(struct page));
Inki Dae694be452012-12-26 18:06:01 +090061 if (!buf->pages) {
62 DRM_ERROR("failed to allocate pages.\n");
63 return -ENOMEM;
64 }
65
66 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
67 &buf->dma_addr, GFP_KERNEL,
68 &buf->dma_attrs);
69 if (!buf->kvaddr) {
70 DRM_ERROR("failed to allocate buffer.\n");
YoungJun Choaf51a5e2013-07-03 17:09:19 +090071 drm_free_large(buf->pages);
Inki Dae694be452012-12-26 18:06:01 +090072 return -ENOMEM;
73 }
74
75 start_addr = buf->dma_addr;
76 while (i < nr_pages) {
77 buf->pages[i] = phys_to_page(start_addr);
78 start_addr += PAGE_SIZE;
79 i++;
80 }
81 } else {
82
83 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
84 &buf->dma_addr, GFP_KERNEL,
85 &buf->dma_attrs);
86 if (!buf->pages) {
87 DRM_ERROR("failed to allocate buffer.\n");
88 return -ENOMEM;
89 }
Inki Dae2b358922012-03-16 18:47:05 +090090 }
91
Inki Dae4744ad22012-12-07 17:51:27 +090092 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
Inki Dae2b358922012-03-16 18:47:05 +090093 if (!buf->sgt) {
Inki Dae4744ad22012-12-07 17:51:27 +090094 DRM_ERROR("failed to get sg table.\n");
Inki Dae61db75d2012-04-03 21:49:15 +090095 ret = -ENOMEM;
Inki Dae0519f9a2012-10-20 07:53:42 -070096 goto err_free_attrs;
Inki Dae61db75d2012-04-03 21:49:15 +090097 }
Inki Dae2b358922012-03-16 18:47:05 +090098
Inki Dae4744ad22012-12-07 17:51:27 +090099 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
Inki Dae2b358922012-03-16 18:47:05 +0900100 (unsigned long)buf->dma_addr,
101 buf->size);
102
103 return ret;
Inki Dae0519f9a2012-10-20 07:53:42 -0700104
Inki Dae0519f9a2012-10-20 07:53:42 -0700105err_free_attrs:
Inki Dae4744ad22012-12-07 17:51:27 +0900106 dma_free_attrs(dev->dev, buf->size, buf->pages,
Inki Dae0519f9a2012-10-20 07:53:42 -0700107 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
108 buf->dma_addr = (dma_addr_t)NULL;
Inki Dae2b358922012-03-16 18:47:05 +0900109
Inki Dae694be452012-12-26 18:06:01 +0900110 if (!is_drm_iommu_supported(dev))
YoungJun Choaf51a5e2013-07-03 17:09:19 +0900111 drm_free_large(buf->pages);
Inki Dae694be452012-12-26 18:06:01 +0900112
Inki Dae2b358922012-03-16 18:47:05 +0900113 return ret;
Inki Dae1c248b72011-10-04 19:19:01 +0900114}
115
116static void lowlevel_buffer_deallocate(struct drm_device *dev,
Inki Dae2b358922012-03-16 18:47:05 +0900117 unsigned int flags, struct exynos_drm_gem_buf *buf)
Inki Dae1c248b72011-10-04 19:19:01 +0900118{
Inki Dae2b358922012-03-16 18:47:05 +0900119 if (!buf->dma_addr) {
120 DRM_DEBUG_KMS("dma_addr is invalid.\n");
121 return;
122 }
123
Inki Dae4744ad22012-12-07 17:51:27 +0900124 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
Inki Dae2b358922012-03-16 18:47:05 +0900125 (unsigned long)buf->dma_addr,
126 buf->size);
127
128 sg_free_table(buf->sgt);
129
130 kfree(buf->sgt);
131 buf->sgt = NULL;
132
Inki Dae694be452012-12-26 18:06:01 +0900133 if (!is_drm_iommu_supported(dev)) {
134 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
Inki Dae0519f9a2012-10-20 07:53:42 -0700135 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
YoungJun Choaf51a5e2013-07-03 17:09:19 +0900136 drm_free_large(buf->pages);
Inki Dae694be452012-12-26 18:06:01 +0900137 } else
138 dma_free_attrs(dev->dev, buf->size, buf->pages,
139 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
140
Inki Dae2b358922012-03-16 18:47:05 +0900141 buf->dma_addr = (dma_addr_t)NULL;
Inki Dae1c248b72011-10-04 19:19:01 +0900142}
143
Inki Dae2b358922012-03-16 18:47:05 +0900144struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
145 unsigned int size)
Inki Dae1c248b72011-10-04 19:19:01 +0900146{
Inki Dae2c871122011-11-12 15:23:32 +0900147 struct exynos_drm_gem_buf *buffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900148
Inki Dae2c871122011-11-12 15:23:32 +0900149 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
Inki Dae1c248b72011-10-04 19:19:01 +0900150
Inki Dae2c871122011-11-12 15:23:32 +0900151 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
152 if (!buffer) {
153 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
Joonyoung Shimee5e7702011-12-13 14:20:23 +0900154 return NULL;
Inki Dae1c248b72011-10-04 19:19:01 +0900155 }
156
Inki Dae2c871122011-11-12 15:23:32 +0900157 buffer->size = size;
Inki Dae2c871122011-11-12 15:23:32 +0900158 return buffer;
Inki Dae1c248b72011-10-04 19:19:01 +0900159}
160
Inki Dae2b358922012-03-16 18:47:05 +0900161void exynos_drm_fini_buf(struct drm_device *dev,
162 struct exynos_drm_gem_buf *buffer)
Inki Dae1c248b72011-10-04 19:19:01 +0900163{
Inki Dae2c871122011-11-12 15:23:32 +0900164 if (!buffer) {
165 DRM_DEBUG_KMS("buffer is null.\n");
Inki Dae1c248b72011-10-04 19:19:01 +0900166 return;
167 }
168
Inki Dae2c871122011-11-12 15:23:32 +0900169 kfree(buffer);
170 buffer = NULL;
Inki Dae1c248b72011-10-04 19:19:01 +0900171}
172
Inki Dae2b358922012-03-16 18:47:05 +0900173int exynos_drm_alloc_buf(struct drm_device *dev,
174 struct exynos_drm_gem_buf *buf, unsigned int flags)
175{
176
177 /*
178 * allocate memory region and set the memory information
179 * to vaddr and dma_addr of a buffer object.
180 */
181 if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
182 return -ENOMEM;
183
184 return 0;
185}
186
187void exynos_drm_free_buf(struct drm_device *dev,
188 unsigned int flags, struct exynos_drm_gem_buf *buffer)
189{
190
191 lowlevel_buffer_deallocate(dev, flags, buffer);
192}