blob: dbb157542f8f049eb6ecf26e118adeb35ce9de89 [file] [log] [blame]
Rob Clarkcd5351f2011-11-12 12:09:40 -06001/*
Rob Clark8bb0daf2013-02-11 12:43:09 -05002 * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
Rob Clarkcd5351f2011-11-12 12:09:40 -06003 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20/* temporary copy of drm_gem_{get,put}_pages() until the
21 * "drm/gem: add functions to get/put pages" patch is merged..
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/shmem_fs.h>
27
28#include <drm/drmP.h>
29
30/**
31 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
32 * @obj: obj in question
33 * @gfpmask: gfp mask of requested pages
34 */
YAMANE Toshiakib1ca0792012-11-14 19:40:29 +090035struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
Rob Clarkcd5351f2011-11-12 12:09:40 -060036{
37 struct inode *inode;
38 struct address_space *mapping;
39 struct page *p, **pages;
40 int i, npages;
41
42 /* This is the shared memory object that backs the GEM resource */
Al Viro496ad9a2013-01-23 17:07:38 -050043 inode = file_inode(obj->filp);
Rob Clarkcd5351f2011-11-12 12:09:40 -060044 mapping = inode->i_mapping;
45
46 npages = obj->size >> PAGE_SHIFT;
47
48 pages = drm_malloc_ab(npages, sizeof(struct page *));
49 if (pages == NULL)
50 return ERR_PTR(-ENOMEM);
51
52 gfpmask |= mapping_gfp_mask(mapping);
53
54 for (i = 0; i < npages; i++) {
55 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
56 if (IS_ERR(p))
57 goto fail;
58 pages[i] = p;
59
60 /* There is a hypothetical issue w/ drivers that require
61 * buffer memory in the low 4GB.. if the pages are un-
62 * pinned, and swapped out, they can end up swapped back
63 * in above 4GB. If pages are already in memory, then
64 * shmem_read_mapping_page_gfp will ignore the gfpmask,
65 * even if the already in-memory page disobeys the mask.
66 *
67 * It is only a theoretical issue today, because none of
68 * the devices with this limitation can be populated with
69 * enough memory to trigger the issue. But this BUG_ON()
70 * is here as a reminder in case the problem with
71 * shmem_read_mapping_page_gfp() isn't solved by the time
72 * it does become a real issue.
73 *
74 * See this thread: http://lkml.org/lkml/2011/7/11/238
75 */
76 BUG_ON((gfpmask & __GFP_DMA32) &&
77 (page_to_pfn(p) >= 0x00100000UL));
78 }
79
80 return pages;
81
82fail:
YAMANE Toshiaki4a193132012-11-14 19:40:43 +090083 while (i--)
Rob Clarkcd5351f2011-11-12 12:09:40 -060084 page_cache_release(pages[i]);
YAMANE Toshiaki4a193132012-11-14 19:40:43 +090085
Rob Clarkcd5351f2011-11-12 12:09:40 -060086 drm_free_large(pages);
Thomas Meyer37fe58b2012-01-22 18:27:21 +010087 return ERR_CAST(p);
Rob Clarkcd5351f2011-11-12 12:09:40 -060088}
89
90/**
91 * drm_gem_put_pages - helper to free backing pages for a GEM object
92 * @obj: obj in question
93 * @pages: pages to free
94 */
95void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
96 bool dirty, bool accessed)
97{
98 int i, npages;
99
100 npages = obj->size >> PAGE_SHIFT;
101
102 for (i = 0; i < npages; i++) {
103 if (dirty)
104 set_page_dirty(pages[i]);
105
106 if (accessed)
107 mark_page_accessed(pages[i]);
108
109 /* Undo the reference we took when populating the table */
110 page_cache_release(pages[i]);
111 }
112
113 drm_free_large(pages);
114}
Rob Clarkf7f9f452011-12-05 19:19:22 -0600115
116int
117_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
118{
119 struct drm_device *dev = obj->dev;
120 struct drm_gem_mm *mm = dev->mm_private;
Rob Clarkf7f9f452011-12-05 19:19:22 -0600121
David Herrmann0de23972013-07-24 21:07:52 +0200122 return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
123 size / PAGE_SIZE);
Rob Clarkf7f9f452011-12-05 19:19:22 -0600124}