blob: 5bbad873c798a8e2168f50c33828f27828c378ed [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070037#include <linux/shmem_fs.h>
Dave Airlie32488772011-11-25 15:21:02 +000038#include <linux/dma-buf.h>
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020040#include <drm/drm_vma_manager.h>
Eric Anholt673a3942008-07-30 12:06:12 -070041
42/** @file drm_gem.c
43 *
44 * This file provides some of the base ioctls and library routines for
45 * the graphics memory manager implemented by each device driver.
46 *
47 * Because various devices have different requirements in terms of
48 * synchronization and migration strategies, implementing that is left up to
49 * the driver, and all that the general API provides should be generic --
50 * allocating objects, reading/writing data with the cpu, freeing objects.
51 * Even there, platform-dependent optimizations for reading/writing data with
52 * the CPU mean we'll likely hook those out to driver-specific calls. However,
53 * the DRI2 implementation wants to have at least allocate/mmap be generic.
54 *
55 * The goal was to have swap-backed object allocation managed through
56 * struct file. However, file descriptors as handles to a struct file have
57 * two major failings:
58 * - Process limits prevent more than 1024 or so being used at a time by
59 * default.
60 * - Inability to allocate high fds will aggravate the X Server's select()
61 * handling, and likely that of many GL client applications as well.
62 *
63 * This led to a plan of using our own integer IDs (called handles, following
64 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
65 * ioctls. The objects themselves will still include the struct file so
66 * that we can transition to fds if the required kernel infrastructure shows
67 * up at a later date, and as our interface with shmfs for memory allocation.
68 */
69
Jesse Barnesa2c0a972008-11-05 10:31:53 -080070/*
71 * We make up offsets for buffer objects so we can recognize them at
72 * mmap time.
73 */
Jordan Crouse05269a32010-05-27 13:40:27 -060074
75/* pgoff in mmap is an unsigned long, so we need to make sure that
76 * the faked up offset will fit
77 */
78
79#if BITS_PER_LONG == 64
Jesse Barnesa2c0a972008-11-05 10:31:53 -080080#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
81#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
Jordan Crouse05269a32010-05-27 13:40:27 -060082#else
83#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
84#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
85#endif
Jesse Barnesa2c0a972008-11-05 10:31:53 -080086
Eric Anholt673a3942008-07-30 12:06:12 -070087/**
88 * Initialize the GEM device fields
89 */
90
91int
92drm_gem_init(struct drm_device *dev)
93{
Daniel Vetterb04a5902013-12-11 14:24:46 +010094 struct drm_vma_offset_manager *vma_offset_manager;
Jesse Barnesa2c0a972008-11-05 10:31:53 -080095
Daniel Vettercd4f0132013-08-15 00:02:44 +020096 mutex_init(&dev->object_name_lock);
Eric Anholt673a3942008-07-30 12:06:12 -070097 idr_init(&dev->object_name_idr);
Jesse Barnesa2c0a972008-11-05 10:31:53 -080098
Daniel Vetterb04a5902013-12-11 14:24:46 +010099 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
100 if (!vma_offset_manager) {
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800101 DRM_ERROR("out of memory\n");
102 return -ENOMEM;
103 }
104
Daniel Vetterb04a5902013-12-11 14:24:46 +0100105 dev->vma_offset_manager = vma_offset_manager;
106 drm_vma_offset_manager_init(vma_offset_manager,
David Herrmann0de23972013-07-24 21:07:52 +0200107 DRM_FILE_PAGE_OFFSET_START,
108 DRM_FILE_PAGE_OFFSET_SIZE);
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800109
Eric Anholt673a3942008-07-30 12:06:12 -0700110 return 0;
111}
112
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800113void
114drm_gem_destroy(struct drm_device *dev)
115{
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800116
Daniel Vetterb04a5902013-12-11 14:24:46 +0100117 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
118 kfree(dev->vma_offset_manager);
119 dev->vma_offset_manager = NULL;
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800120}
121
Eric Anholt673a3942008-07-30 12:06:12 -0700122/**
Alan Cox62cb70112011-06-07 14:17:51 +0100123 * Initialize an already allocated GEM object of the specified size with
Daniel Vetter1d397042010-04-09 19:05:04 +0000124 * shmfs backing store.
125 */
126int drm_gem_object_init(struct drm_device *dev,
127 struct drm_gem_object *obj, size_t size)
128{
David Herrmann89c82332013-07-11 11:56:32 +0200129 struct file *filp;
Daniel Vetter1d397042010-04-09 19:05:04 +0000130
Daniel Vetter6ab11a22014-01-20 08:21:54 +0100131 drm_gem_private_object_init(dev, obj, size);
132
David Herrmann89c82332013-07-11 11:56:32 +0200133 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
134 if (IS_ERR(filp))
135 return PTR_ERR(filp);
Daniel Vetter1d397042010-04-09 19:05:04 +0000136
David Herrmann89c82332013-07-11 11:56:32 +0200137 obj->filp = filp;
Daniel Vetter1d397042010-04-09 19:05:04 +0000138
Daniel Vetter1d397042010-04-09 19:05:04 +0000139 return 0;
140}
141EXPORT_SYMBOL(drm_gem_object_init);
142
143/**
Alan Cox62cb70112011-06-07 14:17:51 +0100144 * Initialize an already allocated GEM object of the specified size with
145 * no GEM provided backing store. Instead the caller is responsible for
146 * backing the object and handling it.
147 */
David Herrmann89c82332013-07-11 11:56:32 +0200148void drm_gem_private_object_init(struct drm_device *dev,
149 struct drm_gem_object *obj, size_t size)
Alan Cox62cb70112011-06-07 14:17:51 +0100150{
151 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
152
153 obj->dev = dev;
154 obj->filp = NULL;
155
156 kref_init(&obj->refcount);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200157 obj->handle_count = 0;
Alan Cox62cb70112011-06-07 14:17:51 +0100158 obj->size = size;
David Herrmann88d7ebe2013-08-25 18:28:57 +0200159 drm_vma_node_reset(&obj->vma_node);
Alan Cox62cb70112011-06-07 14:17:51 +0100160}
161EXPORT_SYMBOL(drm_gem_private_object_init);
162
Dave Airlie0ff926c2012-05-20 17:31:16 +0100163static void
164drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
165{
Daniel Vetter319c9332013-08-15 00:02:46 +0200166 /*
167 * Note: obj->dma_buf can't disappear as long as we still hold a
168 * handle reference in obj->handle_count.
169 */
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200170 mutex_lock(&filp->prime.lock);
Daniel Vetter319c9332013-08-15 00:02:46 +0200171 if (obj->dma_buf) {
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200172 drm_prime_remove_buf_handle_locked(&filp->prime,
173 obj->dma_buf);
Dave Airlie0ff926c2012-05-20 17:31:16 +0100174 }
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200175 mutex_unlock(&filp->prime.lock);
Dave Airlie0ff926c2012-05-20 17:31:16 +0100176}
177
Daniel Vetter36da5902013-08-15 00:02:34 +0200178/**
179 * Called after the last handle to the object has been closed
180 *
181 * Removes any name for the object. Note that this must be
182 * called before drm_gem_object_free or we'll be touching
183 * freed memory
184 */
185static void drm_gem_object_handle_free(struct drm_gem_object *obj)
186{
187 struct drm_device *dev = obj->dev;
188
189 /* Remove any name for this object */
Daniel Vetter36da5902013-08-15 00:02:34 +0200190 if (obj->name) {
191 idr_remove(&dev->object_name_idr, obj->name);
192 obj->name = 0;
Daniel Vettera8e11d12013-08-15 00:02:37 +0200193 }
Daniel Vetter36da5902013-08-15 00:02:34 +0200194}
195
Daniel Vetter319c9332013-08-15 00:02:46 +0200196static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
197{
198 /* Unbreak the reference cycle if we have an exported dma_buf. */
199 if (obj->dma_buf) {
200 dma_buf_put(obj->dma_buf);
201 obj->dma_buf = NULL;
202 }
203}
204
Daniel Vetterbecee2a2013-08-15 00:02:39 +0200205static void
Daniel Vetter36da5902013-08-15 00:02:34 +0200206drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
207{
Daniel Vettera8e11d12013-08-15 00:02:37 +0200208 if (WARN_ON(obj->handle_count == 0))
Daniel Vetter36da5902013-08-15 00:02:34 +0200209 return;
210
211 /*
212 * Must bump handle count first as this may be the last
213 * ref, in which case the object would disappear before we
214 * checked for a name
215 */
216
Daniel Vettercd4f0132013-08-15 00:02:44 +0200217 mutex_lock(&obj->dev->object_name_lock);
Daniel Vetter319c9332013-08-15 00:02:46 +0200218 if (--obj->handle_count == 0) {
Daniel Vetter36da5902013-08-15 00:02:34 +0200219 drm_gem_object_handle_free(obj);
Daniel Vetter319c9332013-08-15 00:02:46 +0200220 drm_gem_object_exported_dma_buf_free(obj);
221 }
Daniel Vettercd4f0132013-08-15 00:02:44 +0200222 mutex_unlock(&obj->dev->object_name_lock);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200223
Daniel Vetter36da5902013-08-15 00:02:34 +0200224 drm_gem_object_unreference_unlocked(obj);
225}
226
Eric Anholt673a3942008-07-30 12:06:12 -0700227/**
228 * Removes the mapping from handle to filp for this object.
229 */
Dave Airlieff72145b2011-02-07 12:16:14 +1000230int
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300231drm_gem_handle_delete(struct drm_file *filp, u32 handle)
Eric Anholt673a3942008-07-30 12:06:12 -0700232{
233 struct drm_device *dev;
234 struct drm_gem_object *obj;
235
236 /* This is gross. The idr system doesn't let us try a delete and
237 * return an error code. It just spews if you fail at deleting.
238 * So, we have to grab a lock around finding the object and then
239 * doing the delete on it and dropping the refcount, or the user
240 * could race us to double-decrement the refcount and cause a
241 * use-after-free later. Given the frequency of our handle lookups,
242 * we may want to use ida for number allocation and a hash table
243 * for the pointers, anyway.
244 */
245 spin_lock(&filp->table_lock);
246
247 /* Check if we currently have a reference on the object */
248 obj = idr_find(&filp->object_idr, handle);
249 if (obj == NULL) {
250 spin_unlock(&filp->table_lock);
251 return -EINVAL;
252 }
253 dev = obj->dev;
254
255 /* Release reference and decrement refcount. */
256 idr_remove(&filp->object_idr, handle);
257 spin_unlock(&filp->table_lock);
258
Thierry Reding9c784852013-08-28 12:04:14 +0200259 if (drm_core_check_feature(dev, DRIVER_PRIME))
260 drm_gem_remove_prime_handles(obj, filp);
David Herrmannca481c92013-08-25 18:28:58 +0200261 drm_vma_node_revoke(&obj->vma_node, filp->filp);
Dave Airlie32488772011-11-25 15:21:02 +0000262
Ben Skeggs304eda32011-06-09 00:24:59 +0000263 if (dev->driver->gem_close_object)
264 dev->driver->gem_close_object(obj, filp);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000265 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700266
267 return 0;
268}
Dave Airlieff72145b2011-02-07 12:16:14 +1000269EXPORT_SYMBOL(drm_gem_handle_delete);
Eric Anholt673a3942008-07-30 12:06:12 -0700270
271/**
Daniel Vetter43387b32013-07-16 09:12:04 +0200272 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
273 *
274 * This implements the ->dumb_destroy kms driver callback for drivers which use
275 * gem to manage their backing storage.
276 */
277int drm_gem_dumb_destroy(struct drm_file *file,
278 struct drm_device *dev,
279 uint32_t handle)
280{
281 return drm_gem_handle_delete(file, handle);
282}
283EXPORT_SYMBOL(drm_gem_dumb_destroy);
284
285/**
Daniel Vetter20228c42013-08-15 00:02:45 +0200286 * drm_gem_handle_create_tail - internal functions to create a handle
287 *
288 * This expects the dev->object_name_lock to be held already and will drop it
289 * before returning. Used to avoid races in establishing new handles when
290 * importing an object from either an flink name or a dma-buf.
Eric Anholt673a3942008-07-30 12:06:12 -0700291 */
292int
Daniel Vetter20228c42013-08-15 00:02:45 +0200293drm_gem_handle_create_tail(struct drm_file *file_priv,
294 struct drm_gem_object *obj,
295 u32 *handlep)
Eric Anholt673a3942008-07-30 12:06:12 -0700296{
Ben Skeggs304eda32011-06-09 00:24:59 +0000297 struct drm_device *dev = obj->dev;
298 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700299
Daniel Vetter20228c42013-08-15 00:02:45 +0200300 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
301
Eric Anholt673a3942008-07-30 12:06:12 -0700302 /*
Tejun Heo2e928812013-02-27 17:04:08 -0800303 * Get the user-visible handle using idr. Preload and perform
304 * allocation under our spinlock.
Eric Anholt673a3942008-07-30 12:06:12 -0700305 */
Tejun Heo2e928812013-02-27 17:04:08 -0800306 idr_preload(GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -0700307 spin_lock(&file_priv->table_lock);
Tejun Heo2e928812013-02-27 17:04:08 -0800308
309 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200310 drm_gem_object_reference(obj);
311 obj->handle_count++;
Eric Anholt673a3942008-07-30 12:06:12 -0700312 spin_unlock(&file_priv->table_lock);
Tejun Heo2e928812013-02-27 17:04:08 -0800313 idr_preload_end();
Daniel Vettercd4f0132013-08-15 00:02:44 +0200314 mutex_unlock(&dev->object_name_lock);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200315 if (ret < 0) {
316 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700317 return ret;
Daniel Vettera8e11d12013-08-15 00:02:37 +0200318 }
Tejun Heo2e928812013-02-27 17:04:08 -0800319 *handlep = ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700320
David Herrmannca481c92013-08-25 18:28:58 +0200321 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
322 if (ret) {
323 drm_gem_handle_delete(file_priv, *handlep);
324 return ret;
325 }
Ben Skeggs304eda32011-06-09 00:24:59 +0000326
327 if (dev->driver->gem_open_object) {
328 ret = dev->driver->gem_open_object(obj, file_priv);
329 if (ret) {
330 drm_gem_handle_delete(file_priv, *handlep);
331 return ret;
332 }
333 }
334
Eric Anholt673a3942008-07-30 12:06:12 -0700335 return 0;
336}
Daniel Vetter20228c42013-08-15 00:02:45 +0200337
338/**
339 * Create a handle for this object. This adds a handle reference
340 * to the object, which includes a regular reference count. Callers
341 * will likely want to dereference the object afterwards.
342 */
343int
344drm_gem_handle_create(struct drm_file *file_priv,
345 struct drm_gem_object *obj,
346 u32 *handlep)
347{
348 mutex_lock(&obj->dev->object_name_lock);
349
350 return drm_gem_handle_create_tail(file_priv, obj, handlep);
351}
Eric Anholt673a3942008-07-30 12:06:12 -0700352EXPORT_SYMBOL(drm_gem_handle_create);
353
Rob Clark75ef8b32011-08-10 08:09:07 -0500354
355/**
356 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
357 * @obj: obj in question
358 *
359 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
360 */
361void
362drm_gem_free_mmap_offset(struct drm_gem_object *obj)
363{
364 struct drm_device *dev = obj->dev;
Rob Clark75ef8b32011-08-10 08:09:07 -0500365
Daniel Vetterb04a5902013-12-11 14:24:46 +0100366 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
Rob Clark75ef8b32011-08-10 08:09:07 -0500367}
368EXPORT_SYMBOL(drm_gem_free_mmap_offset);
369
370/**
Rob Clark367bbd42013-08-07 13:41:23 -0400371 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
372 * @obj: obj in question
373 * @size: the virtual size
374 *
375 * GEM memory mapping works by handing back to userspace a fake mmap offset
376 * it can use in a subsequent mmap(2) call. The DRM core code then looks
377 * up the object based on the offset and sets up the various memory mapping
378 * structures.
379 *
380 * This routine allocates and attaches a fake offset for @obj, in cases where
381 * the virtual size differs from the physical size (ie. obj->size). Otherwise
382 * just use drm_gem_create_mmap_offset().
383 */
384int
385drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
386{
387 struct drm_device *dev = obj->dev;
Rob Clark367bbd42013-08-07 13:41:23 -0400388
Daniel Vetterb04a5902013-12-11 14:24:46 +0100389 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
Rob Clark367bbd42013-08-07 13:41:23 -0400390 size / PAGE_SIZE);
391}
392EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
393
394/**
Rob Clark75ef8b32011-08-10 08:09:07 -0500395 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
396 * @obj: obj in question
397 *
398 * GEM memory mapping works by handing back to userspace a fake mmap offset
399 * it can use in a subsequent mmap(2) call. The DRM core code then looks
400 * up the object based on the offset and sets up the various memory mapping
401 * structures.
402 *
403 * This routine allocates and attaches a fake offset for @obj.
404 */
Rob Clark367bbd42013-08-07 13:41:23 -0400405int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
Rob Clark75ef8b32011-08-10 08:09:07 -0500406{
Rob Clark367bbd42013-08-07 13:41:23 -0400407 return drm_gem_create_mmap_offset_size(obj, obj->size);
Rob Clark75ef8b32011-08-10 08:09:07 -0500408}
409EXPORT_SYMBOL(drm_gem_create_mmap_offset);
410
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400411/**
412 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
413 * from shmem
414 * @obj: obj in question
415 * @gfpmask: gfp mask of requested pages
416 */
417struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
418{
419 struct inode *inode;
420 struct address_space *mapping;
421 struct page *p, **pages;
422 int i, npages;
423
424 /* This is the shared memory object that backs the GEM resource */
425 inode = file_inode(obj->filp);
426 mapping = inode->i_mapping;
427
428 /* We already BUG_ON() for non-page-aligned sizes in
429 * drm_gem_object_init(), so we should never hit this unless
430 * driver author is doing something really wrong:
431 */
432 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
433
434 npages = obj->size >> PAGE_SHIFT;
435
436 pages = drm_malloc_ab(npages, sizeof(struct page *));
437 if (pages == NULL)
438 return ERR_PTR(-ENOMEM);
439
440 gfpmask |= mapping_gfp_mask(mapping);
441
442 for (i = 0; i < npages; i++) {
443 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
444 if (IS_ERR(p))
445 goto fail;
446 pages[i] = p;
447
448 /* There is a hypothetical issue w/ drivers that require
449 * buffer memory in the low 4GB.. if the pages are un-
450 * pinned, and swapped out, they can end up swapped back
451 * in above 4GB. If pages are already in memory, then
452 * shmem_read_mapping_page_gfp will ignore the gfpmask,
453 * even if the already in-memory page disobeys the mask.
454 *
455 * It is only a theoretical issue today, because none of
456 * the devices with this limitation can be populated with
457 * enough memory to trigger the issue. But this BUG_ON()
458 * is here as a reminder in case the problem with
459 * shmem_read_mapping_page_gfp() isn't solved by the time
460 * it does become a real issue.
461 *
462 * See this thread: http://lkml.org/lkml/2011/7/11/238
463 */
464 BUG_ON((gfpmask & __GFP_DMA32) &&
465 (page_to_pfn(p) >= 0x00100000UL));
466 }
467
468 return pages;
469
470fail:
471 while (i--)
472 page_cache_release(pages[i]);
473
474 drm_free_large(pages);
475 return ERR_CAST(p);
476}
477EXPORT_SYMBOL(drm_gem_get_pages);
478
479/**
480 * drm_gem_put_pages - helper to free backing pages for a GEM object
481 * @obj: obj in question
482 * @pages: pages to free
483 * @dirty: if true, pages will be marked as dirty
484 * @accessed: if true, the pages will be marked as accessed
485 */
486void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
487 bool dirty, bool accessed)
488{
489 int i, npages;
490
491 /* We already BUG_ON() for non-page-aligned sizes in
492 * drm_gem_object_init(), so we should never hit this unless
493 * driver author is doing something really wrong:
494 */
495 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
496
497 npages = obj->size >> PAGE_SHIFT;
498
499 for (i = 0; i < npages; i++) {
500 if (dirty)
501 set_page_dirty(pages[i]);
502
503 if (accessed)
504 mark_page_accessed(pages[i]);
505
506 /* Undo the reference we took when populating the table */
507 page_cache_release(pages[i]);
508 }
509
510 drm_free_large(pages);
511}
512EXPORT_SYMBOL(drm_gem_put_pages);
513
Eric Anholt673a3942008-07-30 12:06:12 -0700514/** Returns a reference to the object named by the handle. */
515struct drm_gem_object *
516drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300517 u32 handle)
Eric Anholt673a3942008-07-30 12:06:12 -0700518{
519 struct drm_gem_object *obj;
520
521 spin_lock(&filp->table_lock);
522
523 /* Check if we currently have a reference on the object */
524 obj = idr_find(&filp->object_idr, handle);
525 if (obj == NULL) {
526 spin_unlock(&filp->table_lock);
527 return NULL;
528 }
529
530 drm_gem_object_reference(obj);
531
532 spin_unlock(&filp->table_lock);
533
534 return obj;
535}
536EXPORT_SYMBOL(drm_gem_object_lookup);
537
538/**
539 * Releases the handle to an mm object.
540 */
541int
542drm_gem_close_ioctl(struct drm_device *dev, void *data,
543 struct drm_file *file_priv)
544{
545 struct drm_gem_close *args = data;
546 int ret;
547
548 if (!(dev->driver->driver_features & DRIVER_GEM))
549 return -ENODEV;
550
551 ret = drm_gem_handle_delete(file_priv, args->handle);
552
553 return ret;
554}
555
556/**
557 * Create a global name for an object, returning the name.
558 *
559 * Note that the name does not hold a reference; when the object
560 * is freed, the name goes away.
561 */
562int
563drm_gem_flink_ioctl(struct drm_device *dev, void *data,
564 struct drm_file *file_priv)
565{
566 struct drm_gem_flink *args = data;
567 struct drm_gem_object *obj;
568 int ret;
569
570 if (!(dev->driver->driver_features & DRIVER_GEM))
571 return -ENODEV;
572
573 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
574 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100575 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700576
Daniel Vettercd4f0132013-08-15 00:02:44 +0200577 mutex_lock(&dev->object_name_lock);
Tejun Heo2e928812013-02-27 17:04:08 -0800578 idr_preload(GFP_KERNEL);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200579 /* prevent races with concurrent gem_close. */
580 if (obj->handle_count == 0) {
581 ret = -ENOENT;
582 goto err;
583 }
584
Chris Wilson8d59bae2009-02-11 14:26:28 +0000585 if (!obj->name) {
Tejun Heo2e928812013-02-27 17:04:08 -0800586 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
Tejun Heo2e928812013-02-27 17:04:08 -0800587 if (ret < 0)
Chris Wilson8d59bae2009-02-11 14:26:28 +0000588 goto err;
YoungJun Cho2e07fb22013-06-27 08:58:33 +0900589
590 obj->name = ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700591 }
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000592
YoungJun Cho2e07fb22013-06-27 08:58:33 +0900593 args->name = (uint64_t) obj->name;
594 ret = 0;
595
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000596err:
YoungJun Cho2e07fb22013-06-27 08:58:33 +0900597 idr_preload_end();
Daniel Vettercd4f0132013-08-15 00:02:44 +0200598 mutex_unlock(&dev->object_name_lock);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000599 drm_gem_object_unreference_unlocked(obj);
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000600 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700601}
602
603/**
604 * Open an object using the global name, returning a handle and the size.
605 *
606 * This handle (of course) holds a reference to the object, so the object
607 * will not go away until the handle is deleted.
608 */
609int
610drm_gem_open_ioctl(struct drm_device *dev, void *data,
611 struct drm_file *file_priv)
612{
613 struct drm_gem_open *args = data;
614 struct drm_gem_object *obj;
615 int ret;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300616 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700617
618 if (!(dev->driver->driver_features & DRIVER_GEM))
619 return -ENODEV;
620
Daniel Vettercd4f0132013-08-15 00:02:44 +0200621 mutex_lock(&dev->object_name_lock);
Eric Anholt673a3942008-07-30 12:06:12 -0700622 obj = idr_find(&dev->object_name_idr, (int) args->name);
Daniel Vetter20228c42013-08-15 00:02:45 +0200623 if (obj) {
Eric Anholt673a3942008-07-30 12:06:12 -0700624 drm_gem_object_reference(obj);
Daniel Vetter20228c42013-08-15 00:02:45 +0200625 } else {
626 mutex_unlock(&dev->object_name_lock);
Eric Anholt673a3942008-07-30 12:06:12 -0700627 return -ENOENT;
Daniel Vetter20228c42013-08-15 00:02:45 +0200628 }
Eric Anholt673a3942008-07-30 12:06:12 -0700629
Daniel Vetter20228c42013-08-15 00:02:45 +0200630 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
631 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000632 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700633 if (ret)
634 return ret;
635
636 args->handle = handle;
637 args->size = obj->size;
638
639 return 0;
640}
641
642/**
643 * Called at device open time, sets up the structure for handling refcounting
644 * of mm objects.
645 */
646void
647drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
648{
649 idr_init(&file_private->object_idr);
650 spin_lock_init(&file_private->table_lock);
651}
652
653/**
654 * Called at device close to release the file's
655 * handle references on objects.
656 */
657static int
658drm_gem_object_release_handle(int id, void *ptr, void *data)
659{
Ben Skeggs304eda32011-06-09 00:24:59 +0000660 struct drm_file *file_priv = data;
Eric Anholt673a3942008-07-30 12:06:12 -0700661 struct drm_gem_object *obj = ptr;
Ben Skeggs304eda32011-06-09 00:24:59 +0000662 struct drm_device *dev = obj->dev;
663
Thierry Reding9c784852013-08-28 12:04:14 +0200664 if (drm_core_check_feature(dev, DRIVER_PRIME))
665 drm_gem_remove_prime_handles(obj, file_priv);
David Herrmannca481c92013-08-25 18:28:58 +0200666 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
Dave Airlie32488772011-11-25 15:21:02 +0000667
Ben Skeggs304eda32011-06-09 00:24:59 +0000668 if (dev->driver->gem_close_object)
669 dev->driver->gem_close_object(obj, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -0700670
Luca Barbieribc9025b2010-02-09 05:49:12 +0000671 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700672
673 return 0;
674}
675
676/**
677 * Called at close time when the filp is going away.
678 *
679 * Releases any remaining references on objects by this filp.
680 */
681void
682drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
683{
Eric Anholt673a3942008-07-30 12:06:12 -0700684 idr_for_each(&file_private->object_idr,
Ben Skeggs304eda32011-06-09 00:24:59 +0000685 &drm_gem_object_release_handle, file_private);
Eric Anholt673a3942008-07-30 12:06:12 -0700686 idr_destroy(&file_private->object_idr);
Eric Anholt673a3942008-07-30 12:06:12 -0700687}
688
Daniel Vetterfd632aa2010-04-09 19:05:05 +0000689void
690drm_gem_object_release(struct drm_gem_object *obj)
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000691{
Daniel Vetter319c9332013-08-15 00:02:46 +0200692 WARN_ON(obj->dma_buf);
693
Alan Cox62cb70112011-06-07 14:17:51 +0100694 if (obj->filp)
695 fput(obj->filp);
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000696}
Daniel Vetterfd632aa2010-04-09 19:05:05 +0000697EXPORT_SYMBOL(drm_gem_object_release);
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000698
Eric Anholt673a3942008-07-30 12:06:12 -0700699/**
700 * Called after the last reference to the object has been lost.
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000701 * Must be called holding struct_ mutex
Eric Anholt673a3942008-07-30 12:06:12 -0700702 *
703 * Frees the object
704 */
705void
706drm_gem_object_free(struct kref *kref)
707{
708 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
709 struct drm_device *dev = obj->dev;
710
711 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
712
713 if (dev->driver->gem_free_object != NULL)
714 dev->driver->gem_free_object(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700715}
716EXPORT_SYMBOL(drm_gem_object_free);
717
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800718void drm_gem_vm_open(struct vm_area_struct *vma)
719{
720 struct drm_gem_object *obj = vma->vm_private_data;
721
722 drm_gem_object_reference(obj);
Chris Wilson31dfbc92010-09-27 21:28:30 +0100723
724 mutex_lock(&obj->dev->struct_mutex);
Rob Clarkb06d66b2012-05-01 11:04:51 -0500725 drm_vm_open_locked(obj->dev, vma);
Chris Wilson31dfbc92010-09-27 21:28:30 +0100726 mutex_unlock(&obj->dev->struct_mutex);
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800727}
728EXPORT_SYMBOL(drm_gem_vm_open);
729
730void drm_gem_vm_close(struct vm_area_struct *vma)
731{
732 struct drm_gem_object *obj = vma->vm_private_data;
Chris Wilsonb74ad5a2011-03-17 22:33:33 +0000733 struct drm_device *dev = obj->dev;
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800734
Chris Wilsonb74ad5a2011-03-17 22:33:33 +0000735 mutex_lock(&dev->struct_mutex);
Rob Clarkb06d66b2012-05-01 11:04:51 -0500736 drm_vm_close_locked(obj->dev, vma);
Chris Wilson31dfbc92010-09-27 21:28:30 +0100737 drm_gem_object_unreference(obj);
Chris Wilsonb74ad5a2011-03-17 22:33:33 +0000738 mutex_unlock(&dev->struct_mutex);
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800739}
740EXPORT_SYMBOL(drm_gem_vm_close);
741
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +0200742/**
743 * drm_gem_mmap_obj - memory map a GEM object
744 * @obj: the GEM object to map
745 * @obj_size: the object size to be mapped, in bytes
746 * @vma: VMA for the area to be mapped
747 *
748 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
749 * provided by the driver. Depending on their requirements, drivers can either
750 * provide a fault handler in their gem_vm_ops (in which case any accesses to
751 * the object will be trapped, to perform migration, GTT binding, surface
752 * register allocation, or performance monitoring), or mmap the buffer memory
753 * synchronously after calling drm_gem_mmap_obj.
754 *
755 * This function is mainly intended to implement the DMABUF mmap operation, when
756 * the GEM object is not looked up based on its fake offset. To implement the
757 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
758 *
David Herrmannca481c92013-08-25 18:28:58 +0200759 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
760 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
761 * callers must verify access restrictions before calling this helper.
762 *
YoungJun Cho4368dd82013-06-27 08:39:58 +0900763 * NOTE: This function has to be protected with dev->struct_mutex
764 *
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +0200765 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
766 * size, or if no gem_vm_ops are provided.
767 */
768int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
769 struct vm_area_struct *vma)
770{
771 struct drm_device *dev = obj->dev;
772
YoungJun Cho4368dd82013-06-27 08:39:58 +0900773 lockdep_assert_held(&dev->struct_mutex);
774
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +0200775 /* Check for valid size. */
776 if (obj_size < vma->vm_end - vma->vm_start)
777 return -EINVAL;
778
779 if (!dev->driver->gem_vm_ops)
780 return -EINVAL;
781
782 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
783 vma->vm_ops = dev->driver->gem_vm_ops;
784 vma->vm_private_data = obj;
785 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
786
787 /* Take a ref for this mapping of the object, so that the fault
788 * handler can dereference the mmap offset's pointer to the object.
789 * This reference is cleaned up by the corresponding vm_close
790 * (which should happen whether the vma was created by this call, or
791 * by a vm_open due to mremap or partial unmap or whatever).
792 */
793 drm_gem_object_reference(obj);
794
795 drm_vm_open_locked(dev, vma);
796 return 0;
797}
798EXPORT_SYMBOL(drm_gem_mmap_obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800799
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800800/**
801 * drm_gem_mmap - memory map routine for GEM objects
802 * @filp: DRM file pointer
803 * @vma: VMA for the area to be mapped
804 *
805 * If a driver supports GEM object mapping, mmap calls on the DRM file
806 * descriptor will end up here.
807 *
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +0200808 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800809 * contain the fake offset we created when the GTT map ioctl was called on
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +0200810 * the object) and map it with a call to drm_gem_mmap_obj().
David Herrmannca481c92013-08-25 18:28:58 +0200811 *
812 * If the caller is not granted access to the buffer object, the mmap will fail
813 * with EACCES. Please see the vma manager for more information.
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800814 */
815int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
816{
817 struct drm_file *priv = filp->private_data;
818 struct drm_device *dev = priv->minor->dev;
David Herrmann0de23972013-07-24 21:07:52 +0200819 struct drm_gem_object *obj;
820 struct drm_vma_offset_node *node;
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800821 int ret = 0;
822
Dave Airlie2c07a212012-02-20 14:18:07 +0000823 if (drm_device_is_unplugged(dev))
824 return -ENODEV;
825
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800826 mutex_lock(&dev->struct_mutex);
827
Daniel Vetterb04a5902013-12-11 14:24:46 +0100828 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
829 vma->vm_pgoff,
David Herrmann0de23972013-07-24 21:07:52 +0200830 vma_pages(vma));
831 if (!node) {
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800832 mutex_unlock(&dev->struct_mutex);
833 return drm_mmap(filp, vma);
David Herrmannca481c92013-08-25 18:28:58 +0200834 } else if (!drm_vma_node_is_allowed(node, filp)) {
835 mutex_unlock(&dev->struct_mutex);
836 return -EACCES;
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800837 }
838
David Herrmann0de23972013-07-24 21:07:52 +0200839 obj = container_of(node, struct drm_gem_object, vma_node);
David Herrmannaed2c032013-07-26 12:09:32 +0200840 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800841
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800842 mutex_unlock(&dev->struct_mutex);
843
844 return ret;
845}
846EXPORT_SYMBOL(drm_gem_mmap);