drm/exynos: update gem and buffer framework.

with this patch, we can allocate physically continuous or non-continuous
memory and also it creates scatterlist for iommu support so allocated
memory region can be mapped to iommu page table using scatterlist.

Signed-off-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 3cf785c..4a3a5f7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -25,45 +25,161 @@
 
 #include "drmP.h"
 #include "drm.h"
+#include "exynos_drm.h"
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
 #include "exynos_drm_buf.h"
 
 static int lowlevel_buffer_allocate(struct drm_device *dev,
-		struct exynos_drm_gem_buf *buffer)
+		unsigned int flags, struct exynos_drm_gem_buf *buf)
 {
+	dma_addr_t start_addr, end_addr;
+	unsigned int npages, page_size, i = 0;
+	struct scatterlist *sgl;
+	int ret = 0;
+
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
-			&buffer->dma_addr, GFP_KERNEL);
-	if (!buffer->kvaddr) {
-		DRM_ERROR("failed to allocate buffer.\n");
+	if (flags & EXYNOS_BO_NONCONTIG) {
+		DRM_DEBUG_KMS("not support allocation type.\n");
+		return -EINVAL;
+	}
+
+	if (buf->dma_addr) {
+		DRM_DEBUG_KMS("already allocated.\n");
+		return 0;
+	}
+
+	if (buf->size >= SZ_1M) {
+		npages = (buf->size >> SECTION_SHIFT) + 1;
+		page_size = SECTION_SIZE;
+	} else if (buf->size >= SZ_64K) {
+		npages = (buf->size >> 16) + 1;
+		page_size = SZ_64K;
+	} else {
+		npages = (buf->size >> PAGE_SHIFT) + 1;
+		page_size = PAGE_SIZE;
+	}
+
+	buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!buf->sgt) {
+		DRM_ERROR("failed to allocate sg table.\n");
 		return -ENOMEM;
 	}
 
-	DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
-			(unsigned long)buffer->kvaddr,
-			(unsigned long)buffer->dma_addr,
-			buffer->size);
+	ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
+	if (ret < 0) {
+		DRM_ERROR("failed to initialize sg table.\n");
+		kfree(buf->sgt);
+		buf->sgt = NULL;
+		return -ENOMEM;
+	}
 
-	return 0;
+		buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
+				&buf->dma_addr, GFP_KERNEL);
+		if (!buf->kvaddr) {
+			DRM_ERROR("failed to allocate buffer.\n");
+			ret = -ENOMEM;
+			goto err1;
+		}
+
+		start_addr = buf->dma_addr;
+		end_addr = buf->dma_addr + buf->size;
+
+		buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+		if (!buf->pages) {
+			DRM_ERROR("failed to allocate pages.\n");
+			ret = -ENOMEM;
+			goto err2;
+		}
+
+	start_addr = buf->dma_addr;
+	end_addr = buf->dma_addr + buf->size;
+
+	buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+	if (!buf->pages) {
+		DRM_ERROR("failed to allocate pages.\n");
+		ret = -ENOMEM;
+		goto err2;
+	}
+
+	sgl = buf->sgt->sgl;
+
+	while (i < npages) {
+		buf->pages[i] = phys_to_page(start_addr);
+		sg_set_page(sgl, buf->pages[i], page_size, 0);
+		sg_dma_address(sgl) = start_addr;
+		start_addr += page_size;
+		if (end_addr - start_addr < page_size)
+			break;
+		sgl = sg_next(sgl);
+		i++;
+	}
+
+	buf->pages[i] = phys_to_page(start_addr);
+
+	sgl = sg_next(sgl);
+	sg_set_page(sgl, buf->pages[i+1], end_addr - start_addr, 0);
+
+	DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+			(unsigned long)buf->kvaddr,
+			(unsigned long)buf->dma_addr,
+			buf->size);
+
+	return ret;
+err2:
+	dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
+			(dma_addr_t)buf->dma_addr);
+	buf->dma_addr = (dma_addr_t)NULL;
+err1:
+	sg_free_table(buf->sgt);
+	kfree(buf->sgt);
+	buf->sgt = NULL;
+
+	return ret;
 }
 
 static void lowlevel_buffer_deallocate(struct drm_device *dev,
-		struct exynos_drm_gem_buf *buffer)
+		unsigned int flags, struct exynos_drm_gem_buf *buf)
 {
 	DRM_DEBUG_KMS("%s.\n", __FILE__);
 
-	if (buffer->dma_addr && buffer->size)
-		dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
-				(dma_addr_t)buffer->dma_addr);
-	else
-		DRM_DEBUG_KMS("buffer data are invalid.\n");
+	/*
+	 * release only physically continuous memory and
+	 * non-continuous memory would be released by exynos
+	 * gem framework.
+	 */
+	if (flags & EXYNOS_BO_NONCONTIG) {
+		DRM_DEBUG_KMS("not support allocation type.\n");
+		return;
+	}
+
+	if (!buf->dma_addr) {
+		DRM_DEBUG_KMS("dma_addr is invalid.\n");
+		return;
+	}
+
+	DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+			(unsigned long)buf->kvaddr,
+			(unsigned long)buf->dma_addr,
+			buf->size);
+
+	sg_free_table(buf->sgt);
+
+	kfree(buf->sgt);
+	buf->sgt = NULL;
+
+	kfree(buf->pages);
+	buf->pages = NULL;
+
+	dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
+				(dma_addr_t)buf->dma_addr);
+	buf->dma_addr = (dma_addr_t)NULL;
 }
 
-struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
-		unsigned int size)
+struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
+						unsigned int size)
 {
 	struct exynos_drm_gem_buf *buffer;
 
@@ -77,21 +193,11 @@
 	}
 
 	buffer->size = size;
-
-	/*
-	 * allocate memory region with size and set the memory information
-	 * to vaddr and dma_addr of a buffer object.
-	 */
-	if (lowlevel_buffer_allocate(dev, buffer) < 0) {
-		kfree(buffer);
-		return NULL;
-	}
-
 	return buffer;
 }
 
-void exynos_drm_buf_destroy(struct drm_device *dev,
-		struct exynos_drm_gem_buf *buffer)
+void exynos_drm_fini_buf(struct drm_device *dev,
+				struct exynos_drm_gem_buf *buffer)
 {
 	DRM_DEBUG_KMS("%s.\n", __FILE__);
 
@@ -100,12 +206,27 @@
 		return;
 	}
 
-	lowlevel_buffer_deallocate(dev, buffer);
-
 	kfree(buffer);
 	buffer = NULL;
 }
 
-MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DRM Buffer Management Module");
-MODULE_LICENSE("GPL");
+int exynos_drm_alloc_buf(struct drm_device *dev,
+		struct exynos_drm_gem_buf *buf, unsigned int flags)
+{
+
+	/*
+	 * allocate memory region and set the memory information
+	 * to vaddr and dma_addr of a buffer object.
+	 */
+	if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void exynos_drm_free_buf(struct drm_device *dev,
+		unsigned int flags, struct exynos_drm_gem_buf *buffer)
+{
+
+	lowlevel_buffer_deallocate(dev, flags, buffer);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index c913f2b..3388e4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,12 +26,22 @@
 #ifndef _EXYNOS_DRM_BUF_H_
 #define _EXYNOS_DRM_BUF_H_
 
-/* allocate physical memory. */
-struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
-		unsigned int size);
+/* create and initialize buffer object. */
+struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
+						unsigned int size);
 
-/* remove allocated physical memory. */
-void exynos_drm_buf_destroy(struct drm_device *dev,
-		struct exynos_drm_gem_buf *buffer);
+/* destroy buffer object. */
+void exynos_drm_fini_buf(struct drm_device *dev,
+				struct exynos_drm_gem_buf *buffer);
+
+/* allocate physical memory region and setup sgt and pages. */
+int exynos_drm_alloc_buf(struct drm_device *dev,
+				struct exynos_drm_gem_buf *buf,
+				unsigned int flags);
+
+/* release physical memory region, sgt and pages. */
+void exynos_drm_free_buf(struct drm_device *dev,
+				unsigned int flags,
+				struct exynos_drm_gem_buf *buffer);
 
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 54f8f07..b5dcd53 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -125,7 +125,9 @@
 	}
 
 	size = mode_cmd.pitches[0] * mode_cmd.height;
-	exynos_gem_obj = exynos_drm_gem_create(dev, size);
+
+	/* 0 means to allocate physically continuous memory */
+	exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
 	if (IS_ERR(exynos_gem_obj)) {
 		ret = PTR_ERR(exynos_gem_obj);
 		goto out;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 6545251..730093f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -26,6 +26,7 @@
 #include "drmP.h"
 #include "drm.h"
 
+#include <linux/shmem_fs.h>
 #include <drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
@@ -55,6 +56,178 @@
 	return out_msg;
 }
 
+static unsigned int mask_gem_flags(unsigned int flags)
+{
+	return flags &= EXYNOS_BO_NONCONTIG;
+}
+
+static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
+						gfp_t gfpmask)
+{
+	struct inode *inode;
+	struct address_space *mapping;
+	struct page *p, **pages;
+	int i, npages;
+
+	/* This is the shared memory object that backs the GEM resource */
+	inode = obj->filp->f_path.dentry->d_inode;
+	mapping = inode->i_mapping;
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (pages == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	gfpmask |= mapping_gfp_mask(mapping);
+
+	for (i = 0; i < npages; i++) {
+		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+		if (IS_ERR(p))
+			goto fail;
+		pages[i] = p;
+	}
+
+	return pages;
+
+fail:
+	while (i--)
+		page_cache_release(pages[i]);
+
+	drm_free_large(pages);
+	return ERR_PTR(PTR_ERR(p));
+}
+
+static void exynos_gem_put_pages(struct drm_gem_object *obj,
+					struct page **pages,
+					bool dirty, bool accessed)
+{
+	int i, npages;
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	for (i = 0; i < npages; i++) {
+		if (dirty)
+			set_page_dirty(pages[i]);
+
+		if (accessed)
+			mark_page_accessed(pages[i]);
+
+		/* Undo the reference we took when populating the table */
+		page_cache_release(pages[i]);
+	}
+
+	drm_free_large(pages);
+}
+
+static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+					struct vm_area_struct *vma,
+					unsigned long f_vaddr,
+					pgoff_t page_offset)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+	unsigned long pfn;
+
+	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+		unsigned long usize = buf->size;
+
+		if (!buf->pages)
+			return -EINTR;
+
+		while (usize > 0) {
+			pfn = page_to_pfn(buf->pages[page_offset++]);
+			vm_insert_mixed(vma, f_vaddr, pfn);
+			f_vaddr += PAGE_SIZE;
+			usize -= PAGE_SIZE;
+		}
+
+		return 0;
+	}
+
+	pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
+
+	return vm_insert_mixed(vma, f_vaddr, pfn);
+}
+
+static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+	struct scatterlist *sgl;
+	struct page **pages;
+	unsigned int npages, i = 0;
+	int ret;
+
+	if (buf->pages) {
+		DRM_DEBUG_KMS("already allocated.\n");
+		return -EINVAL;
+	}
+
+	pages = exynos_gem_get_pages(obj, GFP_KERNEL);
+	if (IS_ERR(pages)) {
+		DRM_ERROR("failed to get pages.\n");
+		return PTR_ERR(pages);
+	}
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!buf->sgt) {
+		DRM_ERROR("failed to allocate sg table.\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
+	if (ret < 0) {
+		DRM_ERROR("failed to initialize sg table.\n");
+		ret = -EFAULT;
+		goto err1;
+	}
+
+	sgl = buf->sgt->sgl;
+
+	/* set all pages to sg list. */
+	while (i < npages) {
+		sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
+		sg_dma_address(sgl) = page_to_phys(pages[i]);
+		i++;
+		sgl = sg_next(sgl);
+	}
+
+	/* add some codes for UNCACHED type here. TODO */
+
+	buf->pages = pages;
+	return ret;
+err1:
+	kfree(buf->sgt);
+	buf->sgt = NULL;
+err:
+	exynos_gem_put_pages(obj, pages, true, false);
+	return ret;
+
+}
+
+static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+
+	/*
+	 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
+	 * allocated at gem fault handler.
+	 */
+	sg_free_table(buf->sgt);
+	kfree(buf->sgt);
+	buf->sgt = NULL;
+
+	exynos_gem_put_pages(obj, buf->pages, true, false);
+	buf->pages = NULL;
+
+	/* add some codes for UNCACHED type here. TODO */
+}
+
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
 					struct drm_file *file_priv,
 					unsigned int *handle)
@@ -90,7 +263,15 @@
 
 	DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
 
-	exynos_drm_buf_destroy(obj->dev, exynos_gem_obj->buffer);
+	if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
+			exynos_gem_obj->buffer->pages)
+		exynos_drm_gem_put_pages(obj);
+	else
+		exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
+					exynos_gem_obj->buffer);
+
+	exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
+	exynos_gem_obj->buffer = NULL;
 
 	if (obj->map_list.map)
 		drm_gem_free_mmap_offset(obj);
@@ -99,6 +280,7 @@
 	drm_gem_object_release(obj);
 
 	kfree(exynos_gem_obj);
+	exynos_gem_obj = NULL;
 }
 
 static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
@@ -114,6 +296,7 @@
 		return NULL;
 	}
 
+	exynos_gem_obj->size = size;
 	obj = &exynos_gem_obj->base;
 
 	ret = drm_gem_object_init(dev, obj, size);
@@ -129,27 +312,55 @@
 }
 
 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
-						 unsigned long size)
+						unsigned int flags,
+						unsigned long size)
 {
-	struct exynos_drm_gem_buf *buffer;
 	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct exynos_drm_gem_buf *buf;
+	int ret;
 
 	size = roundup(size, PAGE_SIZE);
 	DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
 
-	buffer = exynos_drm_buf_create(dev, size);
-	if (!buffer)
+	flags = mask_gem_flags(flags);
+
+	buf = exynos_drm_init_buf(dev, size);
+	if (!buf)
 		return ERR_PTR(-ENOMEM);
 
 	exynos_gem_obj = exynos_drm_gem_init(dev, size);
 	if (!exynos_gem_obj) {
-		exynos_drm_buf_destroy(dev, buffer);
-		return ERR_PTR(-ENOMEM);
+		ret = -ENOMEM;
+		goto err;
 	}
 
-	exynos_gem_obj->buffer = buffer;
+	exynos_gem_obj->buffer = buf;
+
+	/* set memory type and cache attribute from user side. */
+	exynos_gem_obj->flags = flags;
+
+	/*
+	 * allocate all pages as desired size if user wants to allocate
+	 * physically non-continuous memory.
+	 */
+	if (flags & EXYNOS_BO_NONCONTIG) {
+		ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
+		if (ret < 0) {
+			drm_gem_object_release(&exynos_gem_obj->base);
+			goto err;
+		}
+	} else {
+		ret = exynos_drm_alloc_buf(dev, buf, flags);
+		if (ret < 0) {
+			drm_gem_object_release(&exynos_gem_obj->base);
+			goto err;
+		}
+	}
 
 	return exynos_gem_obj;
+err:
+	exynos_drm_fini_buf(dev, buf);
+	return ERR_PTR(ret);
 }
 
 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -161,7 +372,7 @@
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
-	exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
+	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
 	if (IS_ERR(exynos_gem_obj))
 		return PTR_ERR(exynos_gem_obj);
 
@@ -200,7 +411,8 @@
 	struct drm_gem_object *obj = filp->private_data;
 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
 	struct exynos_drm_gem_buf *buffer;
-	unsigned long pfn, vm_size;
+	unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+	int ret;
 
 	DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -209,7 +421,8 @@
 	/* in case of direct mapping, always having non-cachable attribute */
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-	vm_size = vma->vm_end - vma->vm_start;
+	vm_size = usize = vma->vm_end - vma->vm_start;
+
 	/*
 	 * a buffer contains information to physically continuous memory
 	 * allocated by user request or at framebuffer creation.
@@ -220,18 +433,37 @@
 	if (vm_size > buffer->size)
 		return -EINVAL;
 
-	/*
-	 * get page frame number to physical memory to be mapped
-	 * to user space.
-	 */
-	pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
+	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+		int i = 0;
 
-	DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
+		if (!buffer->pages)
+			return -EINVAL;
 
-	if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
-				vma->vm_page_prot)) {
-		DRM_ERROR("failed to remap pfn range.\n");
-		return -EAGAIN;
+		do {
+			ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
+			if (ret) {
+				DRM_ERROR("failed to remap user space.\n");
+				return ret;
+			}
+
+			uaddr += PAGE_SIZE;
+			usize -= PAGE_SIZE;
+		} while (usize > 0);
+	} else {
+		/*
+		 * get page frame number to physical memory to be mapped
+		 * to user space.
+		 */
+		pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+								PAGE_SHIFT;
+
+		DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
+
+		if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
+					vma->vm_page_prot)) {
+			DRM_ERROR("failed to remap pfn range.\n");
+			return -EAGAIN;
+		}
 	}
 
 	return 0;
@@ -311,9 +543,9 @@
 	 */
 
 	args->pitch = args->width * args->bpp >> 3;
-	args->size = args->pitch * args->height;
+	args->size = PAGE_ALIGN(args->pitch * args->height);
 
-	exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
+	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
 	if (IS_ERR(exynos_gem_obj))
 		return PTR_ERR(exynos_gem_obj);
 
@@ -397,20 +629,31 @@
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
 	struct drm_device *dev = obj->dev;
-	unsigned long pfn;
+	unsigned long f_vaddr;
 	pgoff_t page_offset;
 	int ret;
 
 	page_offset = ((unsigned long)vmf->virtual_address -
 			vma->vm_start) >> PAGE_SHIFT;
+	f_vaddr = (unsigned long)vmf->virtual_address;
 
 	mutex_lock(&dev->struct_mutex);
 
-	pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
-			PAGE_SHIFT) + page_offset;
+	/*
+	 * allocate all pages as desired size if user wants to allocate
+	 * physically non-continuous memory.
+	 */
+	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
+		ret = exynos_drm_gem_get_pages(obj);
+		if (ret < 0)
+			goto err;
+	}
 
-	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+	ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+	if (ret < 0)
+		DRM_ERROR("failed to map pages.\n");
 
+err:
 	mutex_unlock(&dev->struct_mutex);
 
 	return convert_to_vm_err_msg(ret);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 67cdc91..096267d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -36,11 +36,15 @@
  * @dma_addr: bus address(accessed by dma) to allocated memory region.
  *	- this address could be physical address without IOMMU and
  *	device address with IOMMU.
+ * @sgt: sg table to transfer page data.
+ * @pages: contain all pages to allocated memory region.
  * @size: size of allocated memory region.
  */
 struct exynos_drm_gem_buf {
 	void __iomem		*kvaddr;
 	dma_addr_t		dma_addr;
+	struct sg_table		*sgt;
+	struct page		**pages;
 	unsigned long		size;
 };
 
@@ -55,6 +59,8 @@
  *	by user request or at framebuffer creation.
  *	continuous memory region allocated by user request
  *	or at framebuffer creation.
+ * @size: total memory size to physically non-continuous memory region.
+ * @flags: indicate memory type to allocated buffer and cache attruibute.
  *
  * P.S. this object would be transfered to user as kms_bo.handle so
  *	user can access the buffer through kms_bo.handle.
@@ -62,6 +68,8 @@
 struct exynos_drm_gem_obj {
 	struct drm_gem_object		base;
 	struct exynos_drm_gem_buf	*buffer;
+	unsigned long			size;
+	unsigned int			flags;
 };
 
 /* destroy a buffer with gem object */
@@ -69,7 +77,8 @@
 
 /* create a new buffer with gem object */
 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
-						 unsigned long size);
+						unsigned int flags,
+						unsigned long size);
 
 /*
  * request gem object creation and buffer allocation as the size
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index aff2313..81c9cb7 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -79,6 +79,12 @@
 	__s32 zpos;
 };
 
+/* memory type definitions. */
+enum e_drm_exynos_gem_mem_type {
+	/* Physically Non-Continuous memory. */
+	EXYNOS_BO_NONCONTIG	= 1 << 0
+};
+
 #define DRM_EXYNOS_GEM_CREATE		0x00
 #define DRM_EXYNOS_GEM_MAP_OFFSET	0x01
 #define DRM_EXYNOS_GEM_MMAP		0x02