blob: 09c6bfb86a66c6f12eefcfeb928a88e5b88be643 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3 *
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
Thierry Redingc6a1af8a2014-05-19 13:39:07 +02007 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
David Herrmann1b7199f2014-07-23 12:29:56 +020029#include <linux/debugfs.h>
David Herrmann31bbe162014-01-03 14:09:47 +010030#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/module.h>
32#include <linux/moduleparam.h>
David Herrmann31bbe162014-01-03 14:09:47 +010033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drmP.h>
36#include <drm/drm_core.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Dave Airlieb5e89ed2005-09-25 14:28:13 +100038unsigned int drm_debug = 0; /* 1 to enable debug output */
Linus Torvalds1da177e2005-04-16 15:20:36 -070039EXPORT_SYMBOL(drm_debug);
40
Mario Kleiner27641c32010-10-23 04:20:23 +020041unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
Mario Kleiner27641c32010-10-23 04:20:23 +020042
43unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
Mario Kleiner27641c32010-10-23 04:20:23 +020044
Imre Deakc61eef72012-10-23 18:53:26 +000045/*
46 * Default to use monotonic timestamps for wait-for-vblank and page-flip
47 * complete events.
48 */
49unsigned int drm_timestamp_monotonic = 1;
50
Dave Airlieb5e89ed2005-09-25 14:28:13 +100051MODULE_AUTHOR(CORE_AUTHOR);
52MODULE_DESCRIPTION(CORE_DESC);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053MODULE_LICENSE("GPL and additional rights");
Linus Torvalds1da177e2005-04-16 15:20:36 -070054MODULE_PARM_DESC(debug, "Enable debug output");
Mario Kleiner27641c32010-10-23 04:20:23 +020055MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
56MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
Imre Deakc61eef72012-10-23 18:53:26 +000057MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Dave Jonesc0758142005-10-03 15:02:20 -040059module_param_named(debug, drm_debug, int, 0600);
Mario Kleiner27641c32010-10-23 04:20:23 +020060module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
61module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
Imre Deakc61eef72012-10-23 18:53:26 +000062module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
David Herrmann0d639882014-02-24 15:53:25 +010064static DEFINE_SPINLOCK(drm_minor_lock);
David Herrmann1b7199f2014-07-23 12:29:56 +020065static struct idr drm_minors_idr;
Dave Airlie2c14f282008-04-21 16:47:32 +100066
Greg Kroah-Hartman0650fd52006-01-20 14:08:59 -080067struct class *drm_class;
David Herrmann1b7199f2014-07-23 12:29:56 +020068static struct dentry *drm_debugfs_root;
Joe Perches5ad3d882011-04-17 20:35:51 -070069
70int drm_err(const char *func, const char *format, ...)
71{
72 struct va_format vaf;
73 va_list args;
74 int r;
75
76 va_start(args, format);
77
78 vaf.fmt = format;
79 vaf.va = &args;
80
81 r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
82
83 va_end(args);
84
85 return r;
86}
87EXPORT_SYMBOL(drm_err);
88
Lespiau, Damien1287aa92014-03-24 15:53:17 +000089void drm_ut_debug_printk(const char *function_name, const char *format, ...)
yakui_zhao4fefcb22009-06-02 14:09:47 +080090{
Daniel Vetterfffb9062013-11-17 22:25:02 +010091 struct va_format vaf;
yakui_zhao4fefcb22009-06-02 14:09:47 +080092 va_list args;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Lespiau, Damiena73d4e92014-03-24 15:53:15 +000094 va_start(args, format);
95 vaf.fmt = format;
96 vaf.va = &args;
Daniel Vetterfffb9062013-11-17 22:25:02 +010097
Lespiau, Damien0ed02982014-03-24 15:53:18 +000098 printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
Lespiau, Damiena73d4e92014-03-24 15:53:15 +000099
100 va_end(args);
yakui_zhao4fefcb22009-06-02 14:09:47 +0800101}
102EXPORT_SYMBOL(drm_ut_debug_printk);
Joe Perches5ad3d882011-04-17 20:35:51 -0700103
Dave Airlie7c1c2872008-11-28 14:22:24 +1000104struct drm_master *drm_master_create(struct drm_minor *minor)
105{
106 struct drm_master *master;
107
Eric Anholt9a298b22009-03-24 12:23:04 -0700108 master = kzalloc(sizeof(*master), GFP_KERNEL);
Dave Airlie7c1c2872008-11-28 14:22:24 +1000109 if (!master)
110 return NULL;
111
112 kref_init(&master->refcount);
113 spin_lock_init(&master->lock.spinlock);
114 init_waitqueue_head(&master->lock.lock_queue);
Daniel Vetter10e68562014-04-05 11:12:04 +0200115 if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
116 kfree(master);
117 return NULL;
118 }
Dave Airlie7c1c2872008-11-28 14:22:24 +1000119 INIT_LIST_HEAD(&master->magicfree);
120 master->minor = minor;
121
Dave Airlie7c1c2872008-11-28 14:22:24 +1000122 return master;
123}
124
125struct drm_master *drm_master_get(struct drm_master *master)
126{
127 kref_get(&master->refcount);
128 return master;
129}
Thomas Hellstrom85bb0c32009-12-06 21:46:28 +0100130EXPORT_SYMBOL(drm_master_get);
Dave Airlie7c1c2872008-11-28 14:22:24 +1000131
132static void drm_master_destroy(struct kref *kref)
133{
134 struct drm_master *master = container_of(kref, struct drm_master, refcount);
135 struct drm_magic_entry *pt, *next;
136 struct drm_device *dev = master->minor->dev;
Dave Airliec1ff85d2009-01-19 17:17:58 +1000137 struct drm_map_list *r_list, *list_temp;
Dave Airlie7c1c2872008-11-28 14:22:24 +1000138
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100139 mutex_lock(&dev->struct_mutex);
Dave Airlie7c1c2872008-11-28 14:22:24 +1000140 if (dev->driver->master_destroy)
141 dev->driver->master_destroy(dev, master);
142
Dave Airliec1ff85d2009-01-19 17:17:58 +1000143 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
144 if (r_list->master == master) {
145 drm_rmmap_locked(dev, r_list->map);
146 r_list = NULL;
147 }
148 }
149
Dave Airlie7c1c2872008-11-28 14:22:24 +1000150 if (master->unique) {
Eric Anholt9a298b22009-03-24 12:23:04 -0700151 kfree(master->unique);
Dave Airlie7c1c2872008-11-28 14:22:24 +1000152 master->unique = NULL;
153 master->unique_len = 0;
154 }
155
156 list_for_each_entry_safe(pt, next, &master->magicfree, head) {
157 list_del(&pt->head);
158 drm_ht_remove_item(&master->magiclist, &pt->hash_item);
Eric Anholt9a298b22009-03-24 12:23:04 -0700159 kfree(pt);
Dave Airlie7c1c2872008-11-28 14:22:24 +1000160 }
161
162 drm_ht_remove(&master->magiclist);
163
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100164 mutex_unlock(&dev->struct_mutex);
Eric Anholt9a298b22009-03-24 12:23:04 -0700165 kfree(master);
Dave Airlie7c1c2872008-11-28 14:22:24 +1000166}
167
168void drm_master_put(struct drm_master **master)
169{
170 kref_put(&(*master)->refcount, drm_master_destroy);
171 *master = NULL;
172}
Thomas Hellstrom85bb0c32009-12-06 21:46:28 +0100173EXPORT_SYMBOL(drm_master_put);
Dave Airlie7c1c2872008-11-28 14:22:24 +1000174
175int drm_setmaster_ioctl(struct drm_device *dev, void *data,
176 struct drm_file *file_priv)
177{
Benjamin Gaignard53ef1602013-06-26 17:58:59 +0200178 int ret = 0;
Thomas Hellstrom862302f2009-12-02 18:15:25 +0000179
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100180 mutex_lock(&dev->master_mutex);
David Herrmann48ba8132014-07-22 18:46:09 +0200181 if (drm_is_master(file_priv))
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100182 goto out_unlock;
Jonas Bonn6b008422009-04-16 09:00:02 +0200183
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100184 if (file_priv->minor->master) {
185 ret = -EINVAL;
186 goto out_unlock;
187 }
Dave Airlie7c1c2872008-11-28 14:22:24 +1000188
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100189 if (!file_priv->master) {
190 ret = -EINVAL;
191 goto out_unlock;
192 }
Dave Airlie7c1c2872008-11-28 14:22:24 +1000193
David Herrmann08bec5b2012-11-15 13:04:37 +0000194 file_priv->minor->master = drm_master_get(file_priv->master);
David Herrmann08bec5b2012-11-15 13:04:37 +0000195 if (dev->driver->master_set) {
196 ret = dev->driver->master_set(dev, file_priv, false);
David Herrmann48ba8132014-07-22 18:46:09 +0200197 if (unlikely(ret != 0))
David Herrmann08bec5b2012-11-15 13:04:37 +0000198 drm_master_put(&file_priv->minor->master);
Dave Airlie7c1c2872008-11-28 14:22:24 +1000199 }
200
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100201out_unlock:
202 mutex_unlock(&dev->master_mutex);
Benjamin Gaignard53ef1602013-06-26 17:58:59 +0200203 return ret;
Dave Airlie7c1c2872008-11-28 14:22:24 +1000204}
205
206int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
207 struct drm_file *file_priv)
208{
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100209 int ret = -EINVAL;
210
211 mutex_lock(&dev->master_mutex);
David Herrmann48ba8132014-07-22 18:46:09 +0200212 if (!drm_is_master(file_priv))
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100213 goto out_unlock;
Jonas Bonn6b008422009-04-16 09:00:02 +0200214
Dave Airlie07f1c7a2009-04-20 09:32:50 +1000215 if (!file_priv->minor->master)
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100216 goto out_unlock;
Dave Airlie07f1c7a2009-04-20 09:32:50 +1000217
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100218 ret = 0;
Thomas Hellstrom862302f2009-12-02 18:15:25 +0000219 if (dev->driver->master_drop)
220 dev->driver->master_drop(dev, file_priv, false);
Dave Airlie7c1c2872008-11-28 14:22:24 +1000221 drm_master_put(&file_priv->minor->master);
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100222
223out_unlock:
224 mutex_unlock(&dev->master_mutex);
225 return ret;
Dave Airlie7c1c2872008-11-28 14:22:24 +1000226}
227
David Herrmann0d639882014-02-24 15:53:25 +0100228/*
229 * DRM Minors
230 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
231 * of them is represented by a drm_minor object. Depending on the capabilities
232 * of the device-driver, different interfaces are registered.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 *
David Herrmann0d639882014-02-24 15:53:25 +0100234 * Minors can be accessed via dev->$minor_name. This pointer is either
235 * NULL or a valid drm_minor pointer and stays valid as long as the device is
236 * valid. This means, DRM minors have the same life-time as the underlying
237 * device. However, this doesn't mean that the minor is active. Minors are
238 * registered and unregistered dynamically according to device-state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 */
David Herrmann0d639882014-02-24 15:53:25 +0100240
David Herrmann05b701f2014-01-29 12:43:56 +0100241static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
242 unsigned int type)
243{
244 switch (type) {
245 case DRM_MINOR_LEGACY:
246 return &dev->primary;
247 case DRM_MINOR_RENDER:
248 return &dev->render;
249 case DRM_MINOR_CONTROL:
250 return &dev->control;
251 default:
252 return NULL;
253 }
254}
255
256static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
257{
258 struct drm_minor *minor;
David Herrmannf1b85962014-07-23 10:34:52 +0200259 unsigned long flags;
260 int r;
David Herrmann05b701f2014-01-29 12:43:56 +0100261
262 minor = kzalloc(sizeof(*minor), GFP_KERNEL);
263 if (!minor)
264 return -ENOMEM;
265
266 minor->type = type;
267 minor->dev = dev;
David Herrmann05b701f2014-01-29 12:43:56 +0100268
David Herrmannf1b85962014-07-23 10:34:52 +0200269 idr_preload(GFP_KERNEL);
270 spin_lock_irqsave(&drm_minor_lock, flags);
271 r = idr_alloc(&drm_minors_idr,
272 NULL,
273 64 * type,
274 64 * (type + 1),
275 GFP_NOWAIT);
276 spin_unlock_irqrestore(&drm_minor_lock, flags);
277 idr_preload_end();
278
279 if (r < 0)
280 goto err_free;
281
282 minor->index = r;
283
David Herrmanne1728072014-07-23 11:38:38 +0200284 minor->kdev = drm_sysfs_minor_alloc(minor);
285 if (IS_ERR(minor->kdev)) {
286 r = PTR_ERR(minor->kdev);
287 goto err_index;
288 }
289
David Herrmann05b701f2014-01-29 12:43:56 +0100290 *drm_minor_get_slot(dev, type) = minor;
291 return 0;
David Herrmannf1b85962014-07-23 10:34:52 +0200292
David Herrmanne1728072014-07-23 11:38:38 +0200293err_index:
294 spin_lock_irqsave(&drm_minor_lock, flags);
295 idr_remove(&drm_minors_idr, minor->index);
296 spin_unlock_irqrestore(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200297err_free:
298 kfree(minor);
299 return r;
David Herrmann05b701f2014-01-29 12:43:56 +0100300}
301
David Herrmannbd9dfa92014-01-29 12:55:48 +0100302static void drm_minor_free(struct drm_device *dev, unsigned int type)
303{
David Herrmannf1b85962014-07-23 10:34:52 +0200304 struct drm_minor **slot, *minor;
305 unsigned long flags;
David Herrmannbd9dfa92014-01-29 12:55:48 +0100306
307 slot = drm_minor_get_slot(dev, type);
David Herrmannf1b85962014-07-23 10:34:52 +0200308 minor = *slot;
309 if (!minor)
310 return;
311
312 drm_mode_group_destroy(&minor->mode_group);
David Herrmanne1728072014-07-23 11:38:38 +0200313 put_device(minor->kdev);
David Herrmannf1b85962014-07-23 10:34:52 +0200314
315 spin_lock_irqsave(&drm_minor_lock, flags);
316 idr_remove(&drm_minors_idr, minor->index);
317 spin_unlock_irqrestore(&drm_minor_lock, flags);
318
319 kfree(minor);
320 *slot = NULL;
David Herrmannbd9dfa92014-01-29 12:55:48 +0100321}
322
David Herrmannafcdbc82014-01-29 12:57:05 +0100323static int drm_minor_register(struct drm_device *dev, unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324{
David Herrmannf1b85962014-07-23 10:34:52 +0200325 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100326 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
329 DRM_DEBUG("\n");
330
David Herrmannf1b85962014-07-23 10:34:52 +0200331 minor = *drm_minor_get_slot(dev, type);
332 if (!minor)
David Herrmann05b701f2014-01-29 12:43:56 +0100333 return 0;
334
David Herrmannf1b85962014-07-23 10:34:52 +0200335 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
Ben Gamari955b12d2009-02-17 20:08:49 -0500336 if (ret) {
GeunSik Lim156f5a72009-06-02 15:01:37 +0900337 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
David Herrmannf1b85962014-07-23 10:34:52 +0200338 return ret;
Ben Gamari955b12d2009-02-17 20:08:49 -0500339 }
Dave Airlie2c14f282008-04-21 16:47:32 +1000340
David Herrmanne1728072014-07-23 11:38:38 +0200341 ret = device_add(minor->kdev);
342 if (ret)
Daniel Vettercb6458f2013-08-08 15:41:34 +0200343 goto err_debugfs;
Dave Airlie2c14f282008-04-21 16:47:32 +1000344
David Herrmann0d639882014-02-24 15:53:25 +0100345 /* replace NULL with @minor so lookups will succeed from now on */
346 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200347 idr_replace(&drm_minors_idr, minor, minor->index);
David Herrmann0d639882014-02-24 15:53:25 +0100348 spin_unlock_irqrestore(&drm_minor_lock, flags);
Dave Airlie2c14f282008-04-21 16:47:32 +1000349
David Herrmannf1b85962014-07-23 10:34:52 +0200350 DRM_DEBUG("new minor registered %d\n", minor->index);
Dave Airlie2c14f282008-04-21 16:47:32 +1000351 return 0;
352
Daniel Vettercb6458f2013-08-08 15:41:34 +0200353err_debugfs:
David Herrmannf1b85962014-07-23 10:34:52 +0200354 drm_debugfs_cleanup(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 return ret;
356}
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000357
David Herrmannafcdbc82014-01-29 12:57:05 +0100358static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
David Herrmannf73aca52013-10-20 18:55:40 +0200359{
David Herrmannafcdbc82014-01-29 12:57:05 +0100360 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100361 unsigned long flags;
David Herrmannafcdbc82014-01-29 12:57:05 +0100362
363 minor = *drm_minor_get_slot(dev, type);
David Herrmanne1728072014-07-23 11:38:38 +0200364 if (!minor || !device_is_registered(minor->kdev))
David Herrmannf73aca52013-10-20 18:55:40 +0200365 return;
366
David Herrmannf1b85962014-07-23 10:34:52 +0200367 /* replace @minor with NULL so lookups will fail from now on */
David Herrmann0d639882014-02-24 15:53:25 +0100368 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200369 idr_replace(&drm_minors_idr, NULL, minor->index);
David Herrmann0d639882014-02-24 15:53:25 +0100370 spin_unlock_irqrestore(&drm_minor_lock, flags);
David Herrmann0d639882014-02-24 15:53:25 +0100371
David Herrmanne1728072014-07-23 11:38:38 +0200372 device_del(minor->kdev);
373 dev_set_drvdata(minor->kdev, NULL); /* safety belt */
David Herrmann865fb472013-10-20 18:55:43 +0200374 drm_debugfs_cleanup(minor);
David Herrmannf73aca52013-10-20 18:55:40 +0200375}
376
377/**
David Herrmann1616c522014-01-29 10:49:19 +0100378 * drm_minor_acquire - Acquire a DRM minor
379 * @minor_id: Minor ID of the DRM-minor
David Herrmannf73aca52013-10-20 18:55:40 +0200380 *
David Herrmann1616c522014-01-29 10:49:19 +0100381 * Looks up the given minor-ID and returns the respective DRM-minor object. The
382 * refence-count of the underlying device is increased so you must release this
383 * object with drm_minor_release().
384 *
385 * As long as you hold this minor, it is guaranteed that the object and the
386 * minor->dev pointer will stay valid! However, the device may get unplugged and
387 * unregistered while you hold the minor.
388 *
389 * Returns:
390 * Pointer to minor-object with increased device-refcount, or PTR_ERR on
391 * failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 */
David Herrmann1616c522014-01-29 10:49:19 +0100393struct drm_minor *drm_minor_acquire(unsigned int minor_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394{
David Herrmann1616c522014-01-29 10:49:19 +0100395 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100396 unsigned long flags;
Eric Anholt673a3942008-07-30 12:06:12 -0700397
David Herrmann0d639882014-02-24 15:53:25 +0100398 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmann1616c522014-01-29 10:49:19 +0100399 minor = idr_find(&drm_minors_idr, minor_id);
David Herrmann0d639882014-02-24 15:53:25 +0100400 if (minor)
401 drm_dev_ref(minor->dev);
402 spin_unlock_irqrestore(&drm_minor_lock, flags);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000403
David Herrmann0d639882014-02-24 15:53:25 +0100404 if (!minor) {
405 return ERR_PTR(-ENODEV);
406 } else if (drm_device_is_unplugged(minor->dev)) {
407 drm_dev_unref(minor->dev);
408 return ERR_PTR(-ENODEV);
409 }
410
David Herrmann1616c522014-01-29 10:49:19 +0100411 return minor;
412}
413
414/**
415 * drm_minor_release - Release DRM minor
416 * @minor: Pointer to DRM minor object
417 *
418 * Release a minor that was previously acquired via drm_minor_acquire().
419 */
420void drm_minor_release(struct drm_minor *minor)
421{
422 drm_dev_unref(minor->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500424
425/**
Thierry Redingc6a1af8a2014-05-19 13:39:07 +0200426 * drm_put_dev - Unregister and release a DRM device
427 * @dev: DRM device
428 *
429 * Called at module unload time or when a PCI device is unplugged.
430 *
431 * Use of this function is discouraged. It will eventually go away completely.
432 * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500433 *
434 * Cleans up all DRM device, calling drm_lastclose().
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500435 */
436void drm_put_dev(struct drm_device *dev)
437{
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500438 DRM_DEBUG("\n");
439
440 if (!dev) {
441 DRM_ERROR("cleanup called no dev\n");
442 return;
443 }
444
David Herrmannc3a49732013-10-02 11:23:38 +0200445 drm_dev_unregister(dev);
David Herrmann099d1c22014-01-29 10:21:36 +0100446 drm_dev_unref(dev);
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500447}
448EXPORT_SYMBOL(drm_put_dev);
Dave Airlie2c07a212012-02-20 14:18:07 +0000449
450void drm_unplug_dev(struct drm_device *dev)
451{
452 /* for a USB device */
David Herrmannafcdbc82014-01-29 12:57:05 +0100453 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
454 drm_minor_unregister(dev, DRM_MINOR_RENDER);
455 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
Dave Airlie2c07a212012-02-20 14:18:07 +0000456
457 mutex_lock(&drm_global_mutex);
458
459 drm_device_set_unplugged(dev);
460
461 if (dev->open_count == 0) {
462 drm_put_dev(dev);
463 }
464 mutex_unlock(&drm_global_mutex);
465}
466EXPORT_SYMBOL(drm_unplug_dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200467
David Herrmann31bbe162014-01-03 14:09:47 +0100468/*
469 * DRM internal mount
470 * We want to be able to allocate our own "struct address_space" to control
471 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
472 * stand-alone address_space objects, so we need an underlying inode. As there
473 * is no way to allocate an independent inode easily, we need a fake internal
474 * VFS mount-point.
475 *
476 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
477 * frees it again. You are allowed to use iget() and iput() to get references to
478 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
479 * drm_fs_inode_free() call (which does not have to be the last iput()).
480 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
481 * between multiple inode-users. You could, technically, call
482 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
483 * iput(), but this way you'd end up with a new vfsmount for each inode.
484 */
485
486static int drm_fs_cnt;
487static struct vfsmount *drm_fs_mnt;
488
489static const struct dentry_operations drm_fs_dops = {
490 .d_dname = simple_dname,
491};
492
493static const struct super_operations drm_fs_sops = {
494 .statfs = simple_statfs,
495};
496
497static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
498 const char *dev_name, void *data)
499{
500 return mount_pseudo(fs_type,
501 "drm:",
502 &drm_fs_sops,
503 &drm_fs_dops,
504 0x010203ff);
505}
506
507static struct file_system_type drm_fs_type = {
508 .name = "drm",
509 .owner = THIS_MODULE,
510 .mount = drm_fs_mount,
511 .kill_sb = kill_anon_super,
512};
513
514static struct inode *drm_fs_inode_new(void)
515{
516 struct inode *inode;
517 int r;
518
519 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
520 if (r < 0) {
521 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
522 return ERR_PTR(r);
523 }
524
525 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
526 if (IS_ERR(inode))
527 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
528
529 return inode;
530}
531
532static void drm_fs_inode_free(struct inode *inode)
533{
534 if (inode) {
535 iput(inode);
536 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
537 }
538}
539
David Herrmann1bb72532013-10-02 11:23:34 +0200540/**
Thierry Redingc6a1af8a2014-05-19 13:39:07 +0200541 * drm_dev_alloc - Allocate new DRM device
David Herrmann1bb72532013-10-02 11:23:34 +0200542 * @driver: DRM driver to allocate device for
543 * @parent: Parent device object
544 *
545 * Allocate and initialize a new DRM device. No device registration is done.
David Herrmannc22f0ac2013-10-02 11:23:35 +0200546 * Call drm_dev_register() to advertice the device to user space and register it
547 * with other core subsystems.
David Herrmann1bb72532013-10-02 11:23:34 +0200548 *
David Herrmann099d1c22014-01-29 10:21:36 +0100549 * The initial ref-count of the object is 1. Use drm_dev_ref() and
550 * drm_dev_unref() to take and drop further ref-counts.
551 *
David Herrmann1bb72532013-10-02 11:23:34 +0200552 * RETURNS:
553 * Pointer to new DRM device, or NULL if out of memory.
554 */
555struct drm_device *drm_dev_alloc(struct drm_driver *driver,
556 struct device *parent)
557{
558 struct drm_device *dev;
559 int ret;
560
561 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
562 if (!dev)
563 return NULL;
564
David Herrmann099d1c22014-01-29 10:21:36 +0100565 kref_init(&dev->ref);
David Herrmann1bb72532013-10-02 11:23:34 +0200566 dev->dev = parent;
567 dev->driver = driver;
568
569 INIT_LIST_HEAD(&dev->filelist);
570 INIT_LIST_HEAD(&dev->ctxlist);
571 INIT_LIST_HEAD(&dev->vmalist);
572 INIT_LIST_HEAD(&dev->maplist);
573 INIT_LIST_HEAD(&dev->vblank_event_list);
574
Daniel Vetter2177a212013-12-16 11:21:06 +0100575 spin_lock_init(&dev->buf_lock);
David Herrmann1bb72532013-10-02 11:23:34 +0200576 spin_lock_init(&dev->event_lock);
577 mutex_init(&dev->struct_mutex);
578 mutex_init(&dev->ctxlist_mutex);
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100579 mutex_init(&dev->master_mutex);
David Herrmann1bb72532013-10-02 11:23:34 +0200580
David Herrmann6796cb12014-01-03 14:24:19 +0100581 dev->anon_inode = drm_fs_inode_new();
582 if (IS_ERR(dev->anon_inode)) {
583 ret = PTR_ERR(dev->anon_inode);
584 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
David Herrmann1bb72532013-10-02 11:23:34 +0200585 goto err_free;
David Herrmann6796cb12014-01-03 14:24:19 +0100586 }
587
David Herrmann05b701f2014-01-29 12:43:56 +0100588 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
589 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
590 if (ret)
591 goto err_minors;
592 }
593
David Herrmann6d6dfcf2014-03-16 14:38:40 +0100594 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
David Herrmann05b701f2014-01-29 12:43:56 +0100595 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
596 if (ret)
597 goto err_minors;
598 }
599
600 ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
601 if (ret)
602 goto err_minors;
603
David Herrmann6796cb12014-01-03 14:24:19 +0100604 if (drm_ht_create(&dev->map_hash, 12))
David Herrmann05b701f2014-01-29 12:43:56 +0100605 goto err_minors;
David Herrmann1bb72532013-10-02 11:23:34 +0200606
607 ret = drm_ctxbitmap_init(dev);
608 if (ret) {
609 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
610 goto err_ht;
611 }
612
613 if (driver->driver_features & DRIVER_GEM) {
614 ret = drm_gem_init(dev);
615 if (ret) {
616 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
617 goto err_ctxbitmap;
618 }
619 }
620
621 return dev;
622
623err_ctxbitmap:
624 drm_ctxbitmap_cleanup(dev);
625err_ht:
626 drm_ht_remove(&dev->map_hash);
David Herrmann05b701f2014-01-29 12:43:56 +0100627err_minors:
David Herrmannbd9dfa92014-01-29 12:55:48 +0100628 drm_minor_free(dev, DRM_MINOR_LEGACY);
629 drm_minor_free(dev, DRM_MINOR_RENDER);
630 drm_minor_free(dev, DRM_MINOR_CONTROL);
David Herrmann6796cb12014-01-03 14:24:19 +0100631 drm_fs_inode_free(dev->anon_inode);
David Herrmann1bb72532013-10-02 11:23:34 +0200632err_free:
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100633 mutex_destroy(&dev->master_mutex);
David Herrmann1bb72532013-10-02 11:23:34 +0200634 kfree(dev);
635 return NULL;
636}
637EXPORT_SYMBOL(drm_dev_alloc);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200638
David Herrmann099d1c22014-01-29 10:21:36 +0100639static void drm_dev_release(struct kref *ref)
David Herrmann0dc8fe52013-10-02 11:23:37 +0200640{
David Herrmann099d1c22014-01-29 10:21:36 +0100641 struct drm_device *dev = container_of(ref, struct drm_device, ref);
David Herrmann8f6599d2013-10-20 18:55:45 +0200642
David Herrmann0dc8fe52013-10-02 11:23:37 +0200643 if (dev->driver->driver_features & DRIVER_GEM)
644 drm_gem_destroy(dev);
645
646 drm_ctxbitmap_cleanup(dev);
647 drm_ht_remove(&dev->map_hash);
David Herrmann6796cb12014-01-03 14:24:19 +0100648 drm_fs_inode_free(dev->anon_inode);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200649
David Herrmannbd9dfa92014-01-29 12:55:48 +0100650 drm_minor_free(dev, DRM_MINOR_LEGACY);
651 drm_minor_free(dev, DRM_MINOR_RENDER);
652 drm_minor_free(dev, DRM_MINOR_CONTROL);
653
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100654 mutex_destroy(&dev->master_mutex);
Thierry Redingca8e2ad2014-04-11 15:23:00 +0200655 kfree(dev->unique);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200656 kfree(dev);
657}
David Herrmann099d1c22014-01-29 10:21:36 +0100658
659/**
660 * drm_dev_ref - Take reference of a DRM device
661 * @dev: device to take reference of or NULL
662 *
663 * This increases the ref-count of @dev by one. You *must* already own a
664 * reference when calling this. Use drm_dev_unref() to drop this reference
665 * again.
666 *
667 * This function never fails. However, this function does not provide *any*
668 * guarantee whether the device is alive or running. It only provides a
669 * reference to the object and the memory associated with it.
670 */
671void drm_dev_ref(struct drm_device *dev)
672{
673 if (dev)
674 kref_get(&dev->ref);
675}
676EXPORT_SYMBOL(drm_dev_ref);
677
678/**
679 * drm_dev_unref - Drop reference of a DRM device
680 * @dev: device to drop reference of or NULL
681 *
682 * This decreases the ref-count of @dev by one. The device is destroyed if the
683 * ref-count drops to zero.
684 */
685void drm_dev_unref(struct drm_device *dev)
686{
687 if (dev)
688 kref_put(&dev->ref, drm_dev_release);
689}
690EXPORT_SYMBOL(drm_dev_unref);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200691
692/**
David Herrmannc22f0ac2013-10-02 11:23:35 +0200693 * drm_dev_register - Register DRM device
694 * @dev: Device to register
Thierry Redingc6a1af8a2014-05-19 13:39:07 +0200695 * @flags: Flags passed to the driver's .load() function
David Herrmannc22f0ac2013-10-02 11:23:35 +0200696 *
697 * Register the DRM device @dev with the system, advertise device to user-space
698 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
699 * previously.
700 *
701 * Never call this twice on any device!
702 *
703 * RETURNS:
704 * 0 on success, negative error code on failure.
705 */
706int drm_dev_register(struct drm_device *dev, unsigned long flags)
707{
708 int ret;
709
710 mutex_lock(&drm_global_mutex);
711
David Herrmannafcdbc82014-01-29 12:57:05 +0100712 ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200713 if (ret)
David Herrmann05b701f2014-01-29 12:43:56 +0100714 goto err_minors;
715
David Herrmannafcdbc82014-01-29 12:57:05 +0100716 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
David Herrmann05b701f2014-01-29 12:43:56 +0100717 if (ret)
718 goto err_minors;
719
David Herrmannafcdbc82014-01-29 12:57:05 +0100720 ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
David Herrmann05b701f2014-01-29 12:43:56 +0100721 if (ret)
722 goto err_minors;
David Herrmannc22f0ac2013-10-02 11:23:35 +0200723
724 if (dev->driver->load) {
725 ret = dev->driver->load(dev, flags);
726 if (ret)
David Herrmann05b701f2014-01-29 12:43:56 +0100727 goto err_minors;
David Herrmannc22f0ac2013-10-02 11:23:35 +0200728 }
729
730 /* setup grouping for legacy outputs */
731 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
732 ret = drm_mode_group_init_legacy_group(dev,
733 &dev->primary->mode_group);
734 if (ret)
735 goto err_unload;
736 }
737
David Herrmannc22f0ac2013-10-02 11:23:35 +0200738 ret = 0;
739 goto out_unlock;
740
741err_unload:
742 if (dev->driver->unload)
743 dev->driver->unload(dev);
David Herrmann05b701f2014-01-29 12:43:56 +0100744err_minors:
David Herrmannafcdbc82014-01-29 12:57:05 +0100745 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
746 drm_minor_unregister(dev, DRM_MINOR_RENDER);
747 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200748out_unlock:
749 mutex_unlock(&drm_global_mutex);
750 return ret;
751}
752EXPORT_SYMBOL(drm_dev_register);
David Herrmannc3a49732013-10-02 11:23:38 +0200753
754/**
755 * drm_dev_unregister - Unregister DRM device
756 * @dev: Device to unregister
757 *
758 * Unregister the DRM device from the system. This does the reverse of
759 * drm_dev_register() but does not deallocate the device. The caller must call
David Herrmann099d1c22014-01-29 10:21:36 +0100760 * drm_dev_unref() to drop their final reference.
David Herrmannc3a49732013-10-02 11:23:38 +0200761 */
762void drm_dev_unregister(struct drm_device *dev)
763{
764 struct drm_map_list *r_list, *list_temp;
765
766 drm_lastclose(dev);
767
768 if (dev->driver->unload)
769 dev->driver->unload(dev);
770
Daniel Vetter4efafeb2013-12-11 11:34:38 +0100771 if (dev->agp)
772 drm_pci_agp_destroy(dev);
David Herrmannc3a49732013-10-02 11:23:38 +0200773
774 drm_vblank_cleanup(dev);
775
776 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
777 drm_rmmap(dev, r_list->map);
778
David Herrmannafcdbc82014-01-29 12:57:05 +0100779 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
780 drm_minor_unregister(dev, DRM_MINOR_RENDER);
781 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
David Herrmannc3a49732013-10-02 11:23:38 +0200782}
783EXPORT_SYMBOL(drm_dev_unregister);
Thierry Redingca8e2ad2014-04-11 15:23:00 +0200784
785/**
786 * drm_dev_set_unique - Set the unique name of a DRM device
787 * @dev: device of which to set the unique name
788 * @fmt: format string for unique name
789 *
790 * Sets the unique name of a DRM device using the specified format string and
791 * a variable list of arguments. Drivers can use this at driver probe time if
792 * the unique name of the devices they drive is static.
793 *
794 * Return: 0 on success or a negative error code on failure.
795 */
796int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
797{
798 va_list ap;
799
800 kfree(dev->unique);
801
802 va_start(ap, fmt);
803 dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
804 va_end(ap);
805
806 return dev->unique ? 0 : -ENOMEM;
807}
808EXPORT_SYMBOL(drm_dev_set_unique);
David Herrmann1b7199f2014-07-23 12:29:56 +0200809
810/*
811 * DRM Core
812 * The DRM core module initializes all global DRM objects and makes them
813 * available to drivers. Once setup, drivers can probe their respective
814 * devices.
815 * Currently, core management includes:
816 * - The "DRM-Global" key/value database
817 * - Global ID management for connectors
818 * - DRM major number allocation
819 * - DRM minor management
820 * - DRM sysfs class
821 * - DRM debugfs root
822 *
823 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
824 * interface registered on a DRM device, you can request minor numbers from DRM
825 * core. DRM core takes care of major-number management and char-dev
826 * registration. A stub ->open() callback forwards any open() requests to the
827 * registered minor.
828 */
829
830static int drm_stub_open(struct inode *inode, struct file *filp)
831{
832 const struct file_operations *new_fops;
833 struct drm_minor *minor;
834 int err;
835
836 DRM_DEBUG("\n");
837
838 mutex_lock(&drm_global_mutex);
839 minor = drm_minor_acquire(iminor(inode));
840 if (IS_ERR(minor)) {
841 err = PTR_ERR(minor);
842 goto out_unlock;
843 }
844
845 new_fops = fops_get(minor->dev->driver->fops);
846 if (!new_fops) {
847 err = -ENODEV;
848 goto out_release;
849 }
850
851 replace_fops(filp, new_fops);
852 if (filp->f_op->open)
853 err = filp->f_op->open(inode, filp);
854 else
855 err = 0;
856
857out_release:
858 drm_minor_release(minor);
859out_unlock:
860 mutex_unlock(&drm_global_mutex);
861 return err;
862}
863
864static const struct file_operations drm_stub_fops = {
865 .owner = THIS_MODULE,
866 .open = drm_stub_open,
867 .llseek = noop_llseek,
868};
869
870static int __init drm_core_init(void)
871{
872 int ret = -ENOMEM;
873
874 drm_global_init();
875 drm_connector_ida_init();
876 idr_init(&drm_minors_idr);
877
878 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
879 goto err_p1;
880
881 drm_class = drm_sysfs_create(THIS_MODULE, "drm");
882 if (IS_ERR(drm_class)) {
883 printk(KERN_ERR "DRM: Error creating drm class.\n");
884 ret = PTR_ERR(drm_class);
885 goto err_p2;
886 }
887
888 drm_debugfs_root = debugfs_create_dir("dri", NULL);
889 if (!drm_debugfs_root) {
890 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
891 ret = -1;
892 goto err_p3;
893 }
894
895 DRM_INFO("Initialized %s %d.%d.%d %s\n",
896 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
897 return 0;
898err_p3:
899 drm_sysfs_destroy();
900err_p2:
901 unregister_chrdev(DRM_MAJOR, "drm");
902
903 idr_destroy(&drm_minors_idr);
904err_p1:
905 return ret;
906}
907
908static void __exit drm_core_exit(void)
909{
910 debugfs_remove(drm_debugfs_root);
911 drm_sysfs_destroy();
912
913 unregister_chrdev(DRM_MAJOR, "drm");
914
915 drm_connector_ida_destroy();
916 idr_destroy(&drm_minors_idr);
917}
918
919module_init(drm_core_init);
920module_exit(drm_core_exit);