blob: 6efdba4993fc8c5f1a0f15c0de163bd89d1eaccc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3 *
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
Thierry Redingc6a1af8a2014-05-19 13:39:07 +02007 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
David Herrmann1b7199f2014-07-23 12:29:56 +020029#include <linux/debugfs.h>
David Herrmann31bbe162014-01-03 14:09:47 +010030#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/module.h>
32#include <linux/moduleparam.h>
David Herrmann31bbe162014-01-03 14:09:47 +010033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
David Howells760285e2012-10-02 18:01:07 +010035#include <drm/drmP.h>
Benjamin Gaignard79190ea2016-06-21 16:37:09 +020036#include "drm_crtc_internal.h"
David Herrmanne7b960702014-07-24 12:10:04 +020037#include "drm_legacy.h"
Daniel Vetter67d0ec42014-09-10 12:43:53 +020038#include "drm_internal.h"
Daniel Vetter81065542016-06-21 10:54:13 +020039#include "drm_crtc_internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Ezequiel Garcia6dc3e222016-04-20 13:45:03 -030041/*
42 * drm_debug: Enable debug output.
43 * Bitmask of DRM_UT_x. See include/drm/drmP.h for details.
44 */
45unsigned int drm_debug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046EXPORT_SYMBOL(drm_debug);
47
David Herrmann82d5e732016-09-01 14:48:36 +020048MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
49MODULE_DESCRIPTION("DRM shared core routines");
Linus Torvalds1da177e2005-04-16 15:20:36 -070050MODULE_LICENSE("GPL and additional rights");
Ezequiel Garcia6dc3e222016-04-20 13:45:03 -030051MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
52"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
53"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
54"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
55"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
56"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
57"\t\tBit 5 (0x20) will enable VBL messages (vblank code)");
Dave Jonesc0758142005-10-03 15:02:20 -040058module_param_named(debug, drm_debug, int, 0600);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
David Herrmann0d639882014-02-24 15:53:25 +010060static DEFINE_SPINLOCK(drm_minor_lock);
David Herrmann1b7199f2014-07-23 12:29:56 +020061static struct idr drm_minors_idr;
Dave Airlie2c14f282008-04-21 16:47:32 +100062
David Herrmann1b7199f2014-07-23 12:29:56 +020063static struct dentry *drm_debugfs_root;
Joe Perches5ad3d882011-04-17 20:35:51 -070064
Sean Paulc4e68a52016-08-15 16:18:04 -070065#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
66
67void drm_dev_printk(const struct device *dev, const char *level,
68 unsigned int category, const char *function_name,
69 const char *prefix, const char *format, ...)
Joe Perches5ad3d882011-04-17 20:35:51 -070070{
71 struct va_format vaf;
72 va_list args;
Joe Perches5ad3d882011-04-17 20:35:51 -070073
Sean Paulc4e68a52016-08-15 16:18:04 -070074 if (category != DRM_UT_NONE && !(drm_debug & category))
75 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Lespiau, Damiena73d4e92014-03-24 15:53:15 +000077 va_start(args, format);
78 vaf.fmt = format;
79 vaf.va = &args;
Daniel Vetterfffb9062013-11-17 22:25:02 +010080
Chris Wilsonb4ba97e2016-08-19 08:37:50 +010081 if (dev)
82 dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix,
83 &vaf);
84 else
85 printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
Lespiau, Damiena73d4e92014-03-24 15:53:15 +000086
87 va_end(args);
yakui_zhao4fefcb22009-06-02 14:09:47 +080088}
Sean Paulc4e68a52016-08-15 16:18:04 -070089EXPORT_SYMBOL(drm_dev_printk);
90
91void drm_printk(const char *level, unsigned int category,
Sean Paulc4e68a52016-08-15 16:18:04 -070092 const char *format, ...)
93{
94 struct va_format vaf;
95 va_list args;
96
97 if (category != DRM_UT_NONE && !(drm_debug & category))
98 return;
99
100 va_start(args, format);
101 vaf.fmt = format;
102 vaf.va = &args;
103
Joe Perches6bd488d2016-09-25 19:18:34 -0700104 printk("%s" "[" DRM_NAME ":%ps]%s %pV",
105 level, __builtin_return_address(0),
106 strcmp(level, KERN_ERR) == 0 ? " *ERROR*" : "", &vaf);
Sean Paulc4e68a52016-08-15 16:18:04 -0700107
108 va_end(args);
109}
110EXPORT_SYMBOL(drm_printk);
Joe Perches5ad3d882011-04-17 20:35:51 -0700111
David Herrmann0d639882014-02-24 15:53:25 +0100112/*
113 * DRM Minors
114 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
115 * of them is represented by a drm_minor object. Depending on the capabilities
116 * of the device-driver, different interfaces are registered.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 *
David Herrmann0d639882014-02-24 15:53:25 +0100118 * Minors can be accessed via dev->$minor_name. This pointer is either
119 * NULL or a valid drm_minor pointer and stays valid as long as the device is
120 * valid. This means, DRM minors have the same life-time as the underlying
121 * device. However, this doesn't mean that the minor is active. Minors are
122 * registered and unregistered dynamically according to device-state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 */
David Herrmann0d639882014-02-24 15:53:25 +0100124
David Herrmann05b701f2014-01-29 12:43:56 +0100125static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
126 unsigned int type)
127{
128 switch (type) {
David Herrmanna3ccc462016-08-03 20:04:25 +0200129 case DRM_MINOR_PRIMARY:
David Herrmann05b701f2014-01-29 12:43:56 +0100130 return &dev->primary;
131 case DRM_MINOR_RENDER:
132 return &dev->render;
133 case DRM_MINOR_CONTROL:
134 return &dev->control;
135 default:
136 return NULL;
137 }
138}
139
140static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
141{
142 struct drm_minor *minor;
David Herrmannf1b85962014-07-23 10:34:52 +0200143 unsigned long flags;
144 int r;
David Herrmann05b701f2014-01-29 12:43:56 +0100145
146 minor = kzalloc(sizeof(*minor), GFP_KERNEL);
147 if (!minor)
148 return -ENOMEM;
149
150 minor->type = type;
151 minor->dev = dev;
David Herrmann05b701f2014-01-29 12:43:56 +0100152
David Herrmannf1b85962014-07-23 10:34:52 +0200153 idr_preload(GFP_KERNEL);
154 spin_lock_irqsave(&drm_minor_lock, flags);
155 r = idr_alloc(&drm_minors_idr,
156 NULL,
157 64 * type,
158 64 * (type + 1),
159 GFP_NOWAIT);
160 spin_unlock_irqrestore(&drm_minor_lock, flags);
161 idr_preload_end();
162
163 if (r < 0)
164 goto err_free;
165
166 minor->index = r;
167
David Herrmanne1728072014-07-23 11:38:38 +0200168 minor->kdev = drm_sysfs_minor_alloc(minor);
169 if (IS_ERR(minor->kdev)) {
170 r = PTR_ERR(minor->kdev);
171 goto err_index;
172 }
173
David Herrmann05b701f2014-01-29 12:43:56 +0100174 *drm_minor_get_slot(dev, type) = minor;
175 return 0;
David Herrmannf1b85962014-07-23 10:34:52 +0200176
David Herrmanne1728072014-07-23 11:38:38 +0200177err_index:
178 spin_lock_irqsave(&drm_minor_lock, flags);
179 idr_remove(&drm_minors_idr, minor->index);
180 spin_unlock_irqrestore(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200181err_free:
182 kfree(minor);
183 return r;
David Herrmann05b701f2014-01-29 12:43:56 +0100184}
185
David Herrmannbd9dfa92014-01-29 12:55:48 +0100186static void drm_minor_free(struct drm_device *dev, unsigned int type)
187{
David Herrmannf1b85962014-07-23 10:34:52 +0200188 struct drm_minor **slot, *minor;
189 unsigned long flags;
David Herrmannbd9dfa92014-01-29 12:55:48 +0100190
191 slot = drm_minor_get_slot(dev, type);
David Herrmannf1b85962014-07-23 10:34:52 +0200192 minor = *slot;
193 if (!minor)
194 return;
195
David Herrmanne1728072014-07-23 11:38:38 +0200196 put_device(minor->kdev);
David Herrmannf1b85962014-07-23 10:34:52 +0200197
198 spin_lock_irqsave(&drm_minor_lock, flags);
199 idr_remove(&drm_minors_idr, minor->index);
200 spin_unlock_irqrestore(&drm_minor_lock, flags);
201
202 kfree(minor);
203 *slot = NULL;
David Herrmannbd9dfa92014-01-29 12:55:48 +0100204}
205
David Herrmannafcdbc82014-01-29 12:57:05 +0100206static int drm_minor_register(struct drm_device *dev, unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
David Herrmannf1b85962014-07-23 10:34:52 +0200208 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100209 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212 DRM_DEBUG("\n");
213
David Herrmannf1b85962014-07-23 10:34:52 +0200214 minor = *drm_minor_get_slot(dev, type);
215 if (!minor)
David Herrmann05b701f2014-01-29 12:43:56 +0100216 return 0;
217
David Herrmannf1b85962014-07-23 10:34:52 +0200218 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
Ben Gamari955b12d2009-02-17 20:08:49 -0500219 if (ret) {
GeunSik Lim156f5a72009-06-02 15:01:37 +0900220 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
David Herrmannf1b85962014-07-23 10:34:52 +0200221 return ret;
Ben Gamari955b12d2009-02-17 20:08:49 -0500222 }
Dave Airlie2c14f282008-04-21 16:47:32 +1000223
David Herrmanne1728072014-07-23 11:38:38 +0200224 ret = device_add(minor->kdev);
225 if (ret)
Daniel Vettercb6458f2013-08-08 15:41:34 +0200226 goto err_debugfs;
Dave Airlie2c14f282008-04-21 16:47:32 +1000227
David Herrmann0d639882014-02-24 15:53:25 +0100228 /* replace NULL with @minor so lookups will succeed from now on */
229 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200230 idr_replace(&drm_minors_idr, minor, minor->index);
David Herrmann0d639882014-02-24 15:53:25 +0100231 spin_unlock_irqrestore(&drm_minor_lock, flags);
Dave Airlie2c14f282008-04-21 16:47:32 +1000232
David Herrmannf1b85962014-07-23 10:34:52 +0200233 DRM_DEBUG("new minor registered %d\n", minor->index);
Dave Airlie2c14f282008-04-21 16:47:32 +1000234 return 0;
235
Daniel Vettercb6458f2013-08-08 15:41:34 +0200236err_debugfs:
David Herrmannf1b85962014-07-23 10:34:52 +0200237 drm_debugfs_cleanup(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 return ret;
239}
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000240
David Herrmannafcdbc82014-01-29 12:57:05 +0100241static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
David Herrmannf73aca52013-10-20 18:55:40 +0200242{
David Herrmannafcdbc82014-01-29 12:57:05 +0100243 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100244 unsigned long flags;
David Herrmannafcdbc82014-01-29 12:57:05 +0100245
246 minor = *drm_minor_get_slot(dev, type);
David Herrmanne1728072014-07-23 11:38:38 +0200247 if (!minor || !device_is_registered(minor->kdev))
David Herrmannf73aca52013-10-20 18:55:40 +0200248 return;
249
David Herrmannf1b85962014-07-23 10:34:52 +0200250 /* replace @minor with NULL so lookups will fail from now on */
David Herrmann0d639882014-02-24 15:53:25 +0100251 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200252 idr_replace(&drm_minors_idr, NULL, minor->index);
David Herrmann0d639882014-02-24 15:53:25 +0100253 spin_unlock_irqrestore(&drm_minor_lock, flags);
David Herrmann0d639882014-02-24 15:53:25 +0100254
David Herrmanne1728072014-07-23 11:38:38 +0200255 device_del(minor->kdev);
256 dev_set_drvdata(minor->kdev, NULL); /* safety belt */
David Herrmann865fb472013-10-20 18:55:43 +0200257 drm_debugfs_cleanup(minor);
David Herrmannf73aca52013-10-20 18:55:40 +0200258}
259
260/**
David Herrmann1616c522014-01-29 10:49:19 +0100261 * drm_minor_acquire - Acquire a DRM minor
262 * @minor_id: Minor ID of the DRM-minor
David Herrmannf73aca52013-10-20 18:55:40 +0200263 *
David Herrmann1616c522014-01-29 10:49:19 +0100264 * Looks up the given minor-ID and returns the respective DRM-minor object. The
265 * refence-count of the underlying device is increased so you must release this
266 * object with drm_minor_release().
267 *
268 * As long as you hold this minor, it is guaranteed that the object and the
269 * minor->dev pointer will stay valid! However, the device may get unplugged and
270 * unregistered while you hold the minor.
271 *
272 * Returns:
273 * Pointer to minor-object with increased device-refcount, or PTR_ERR on
274 * failure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 */
David Herrmann1616c522014-01-29 10:49:19 +0100276struct drm_minor *drm_minor_acquire(unsigned int minor_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277{
David Herrmann1616c522014-01-29 10:49:19 +0100278 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100279 unsigned long flags;
Eric Anholt673a3942008-07-30 12:06:12 -0700280
David Herrmann0d639882014-02-24 15:53:25 +0100281 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmann1616c522014-01-29 10:49:19 +0100282 minor = idr_find(&drm_minors_idr, minor_id);
David Herrmann0d639882014-02-24 15:53:25 +0100283 if (minor)
284 drm_dev_ref(minor->dev);
285 spin_unlock_irqrestore(&drm_minor_lock, flags);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000286
David Herrmann0d639882014-02-24 15:53:25 +0100287 if (!minor) {
288 return ERR_PTR(-ENODEV);
289 } else if (drm_device_is_unplugged(minor->dev)) {
290 drm_dev_unref(minor->dev);
291 return ERR_PTR(-ENODEV);
292 }
293
David Herrmann1616c522014-01-29 10:49:19 +0100294 return minor;
295}
296
297/**
298 * drm_minor_release - Release DRM minor
299 * @minor: Pointer to DRM minor object
300 *
301 * Release a minor that was previously acquired via drm_minor_acquire().
302 */
303void drm_minor_release(struct drm_minor *minor)
304{
305 drm_dev_unref(minor->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500307
308/**
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200309 * DOC: driver instance overview
310 *
311 * A device instance for a drm driver is represented by struct &drm_device. This
312 * is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
313 * callbacks implemented by the driver. The driver then needs to initialize all
314 * the various subsystems for the drm device like memory management, vblank
315 * handling, modesetting support and intial output configuration plus obviously
Daniel Vettera7429462016-06-21 10:54:16 +0200316 * initialize all the corresponding hardware bits. Finally when everything is up
317 * and running and ready for userspace the device instance can be published
318 * using drm_dev_register().
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200319 *
320 * There is also deprecated support for initalizing device instances using
321 * bus-specific helpers and the ->load() callback. But due to
322 * backwards-compatibility needs the device instance have to be published too
323 * early, which requires unpretty global locking to make safe and is therefore
324 * only support for existing drivers not yet converted to the new scheme.
325 *
326 * When cleaning up a device instance everything needs to be done in reverse:
327 * First unpublish the device instance with drm_dev_unregister(). Then clean up
328 * any other resources allocated at device initialization and drop the driver's
329 * reference to &drm_device using drm_dev_unref().
330 *
331 * Note that the lifetime rules for &drm_device instance has still a lot of
332 * historical baggage. Hence use the reference counting provided by
333 * drm_dev_ref() and drm_dev_unref() only carefully.
334 *
335 * Also note that embedding of &drm_device is currently not (yet) supported (but
336 * it would be easy to add). Drivers can store driver-private data in the
337 * dev_priv field of &drm_device.
338 */
339
Daniel Vettera7429462016-06-21 10:54:16 +0200340static int drm_dev_set_unique(struct drm_device *dev, const char *name)
341{
Tom Gundersenc6bf8112016-09-21 16:59:18 +0200342 if (!name)
343 return -EINVAL;
344
Daniel Vettera7429462016-06-21 10:54:16 +0200345 kfree(dev->unique);
346 dev->unique = kstrdup(name, GFP_KERNEL);
347
348 return dev->unique ? 0 : -ENOMEM;
349}
350
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200351/**
Thierry Redingc6a1af8a2014-05-19 13:39:07 +0200352 * drm_put_dev - Unregister and release a DRM device
353 * @dev: DRM device
354 *
355 * Called at module unload time or when a PCI device is unplugged.
356 *
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500357 * Cleans up all DRM device, calling drm_lastclose().
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200358 *
359 * Note: Use of this function is deprecated. It will eventually go away
360 * completely. Please use drm_dev_unregister() and drm_dev_unref() explicitly
361 * instead to make sure that the device isn't userspace accessible any more
362 * while teardown is in progress, ensuring that userspace can't access an
363 * inconsistent state.
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500364 */
365void drm_put_dev(struct drm_device *dev)
366{
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500367 DRM_DEBUG("\n");
368
369 if (!dev) {
370 DRM_ERROR("cleanup called no dev\n");
371 return;
372 }
373
David Herrmannc3a49732013-10-02 11:23:38 +0200374 drm_dev_unregister(dev);
David Herrmann099d1c22014-01-29 10:21:36 +0100375 drm_dev_unref(dev);
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500376}
377EXPORT_SYMBOL(drm_put_dev);
Dave Airlie2c07a212012-02-20 14:18:07 +0000378
379void drm_unplug_dev(struct drm_device *dev)
380{
381 /* for a USB device */
Chris Wilsona39be602016-06-24 15:36:20 +0100382 drm_dev_unregister(dev);
Dave Airlie2c07a212012-02-20 14:18:07 +0000383
384 mutex_lock(&drm_global_mutex);
385
386 drm_device_set_unplugged(dev);
387
388 if (dev->open_count == 0) {
389 drm_put_dev(dev);
390 }
391 mutex_unlock(&drm_global_mutex);
392}
393EXPORT_SYMBOL(drm_unplug_dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200394
David Herrmann31bbe162014-01-03 14:09:47 +0100395/*
396 * DRM internal mount
397 * We want to be able to allocate our own "struct address_space" to control
398 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
399 * stand-alone address_space objects, so we need an underlying inode. As there
400 * is no way to allocate an independent inode easily, we need a fake internal
401 * VFS mount-point.
402 *
403 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
404 * frees it again. You are allowed to use iget() and iput() to get references to
405 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
406 * drm_fs_inode_free() call (which does not have to be the last iput()).
407 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
408 * between multiple inode-users. You could, technically, call
409 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
410 * iput(), but this way you'd end up with a new vfsmount for each inode.
411 */
412
413static int drm_fs_cnt;
414static struct vfsmount *drm_fs_mnt;
415
416static const struct dentry_operations drm_fs_dops = {
417 .d_dname = simple_dname,
418};
419
420static const struct super_operations drm_fs_sops = {
421 .statfs = simple_statfs,
422};
423
424static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
425 const char *dev_name, void *data)
426{
427 return mount_pseudo(fs_type,
428 "drm:",
429 &drm_fs_sops,
430 &drm_fs_dops,
431 0x010203ff);
432}
433
434static struct file_system_type drm_fs_type = {
435 .name = "drm",
436 .owner = THIS_MODULE,
437 .mount = drm_fs_mount,
438 .kill_sb = kill_anon_super,
439};
440
441static struct inode *drm_fs_inode_new(void)
442{
443 struct inode *inode;
444 int r;
445
446 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
447 if (r < 0) {
448 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
449 return ERR_PTR(r);
450 }
451
452 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
453 if (IS_ERR(inode))
454 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
455
456 return inode;
457}
458
459static void drm_fs_inode_free(struct inode *inode)
460{
461 if (inode) {
462 iput(inode);
463 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
464 }
465}
466
David Herrmann1bb72532013-10-02 11:23:34 +0200467/**
Chris Wilsonb209aca2016-06-15 13:17:46 +0100468 * drm_dev_init - Initialise new DRM device
469 * @dev: DRM device
470 * @driver: DRM driver
David Herrmann1bb72532013-10-02 11:23:34 +0200471 * @parent: Parent device object
472 *
Chris Wilsonb209aca2016-06-15 13:17:46 +0100473 * Initialize a new DRM device. No device registration is done.
David Herrmannc22f0ac2013-10-02 11:23:35 +0200474 * Call drm_dev_register() to advertice the device to user space and register it
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200475 * with other core subsystems. This should be done last in the device
476 * initialization sequence to make sure userspace can't access an inconsistent
477 * state.
David Herrmann1bb72532013-10-02 11:23:34 +0200478 *
David Herrmann099d1c22014-01-29 10:21:36 +0100479 * The initial ref-count of the object is 1. Use drm_dev_ref() and
480 * drm_dev_unref() to take and drop further ref-counts.
481 *
Daniel Vetterb0ff4b92014-11-24 20:01:58 +0100482 * Note that for purely virtual devices @parent can be NULL.
483 *
Chris Wilsonb209aca2016-06-15 13:17:46 +0100484 * Drivers that do not want to allocate their own device struct
485 * embedding struct &drm_device can call drm_dev_alloc() instead.
486 *
David Herrmann1bb72532013-10-02 11:23:34 +0200487 * RETURNS:
Chris Wilsonb209aca2016-06-15 13:17:46 +0100488 * 0 on success, or error code on failure.
David Herrmann1bb72532013-10-02 11:23:34 +0200489 */
Chris Wilsonb209aca2016-06-15 13:17:46 +0100490int drm_dev_init(struct drm_device *dev,
491 struct drm_driver *driver,
492 struct device *parent)
David Herrmann1bb72532013-10-02 11:23:34 +0200493{
David Herrmann1bb72532013-10-02 11:23:34 +0200494 int ret;
495
David Herrmann099d1c22014-01-29 10:21:36 +0100496 kref_init(&dev->ref);
David Herrmann1bb72532013-10-02 11:23:34 +0200497 dev->dev = parent;
498 dev->driver = driver;
499
500 INIT_LIST_HEAD(&dev->filelist);
501 INIT_LIST_HEAD(&dev->ctxlist);
502 INIT_LIST_HEAD(&dev->vmalist);
503 INIT_LIST_HEAD(&dev->maplist);
504 INIT_LIST_HEAD(&dev->vblank_event_list);
505
Daniel Vetter2177a212013-12-16 11:21:06 +0100506 spin_lock_init(&dev->buf_lock);
David Herrmann1bb72532013-10-02 11:23:34 +0200507 spin_lock_init(&dev->event_lock);
508 mutex_init(&dev->struct_mutex);
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200509 mutex_init(&dev->filelist_mutex);
David Herrmann1bb72532013-10-02 11:23:34 +0200510 mutex_init(&dev->ctxlist_mutex);
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100511 mutex_init(&dev->master_mutex);
David Herrmann1bb72532013-10-02 11:23:34 +0200512
David Herrmann6796cb12014-01-03 14:24:19 +0100513 dev->anon_inode = drm_fs_inode_new();
514 if (IS_ERR(dev->anon_inode)) {
515 ret = PTR_ERR(dev->anon_inode);
516 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
David Herrmann1bb72532013-10-02 11:23:34 +0200517 goto err_free;
David Herrmann6796cb12014-01-03 14:24:19 +0100518 }
519
David Herrmann05b701f2014-01-29 12:43:56 +0100520 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
521 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
522 if (ret)
523 goto err_minors;
524 }
525
David Herrmann6d6dfcf2014-03-16 14:38:40 +0100526 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
David Herrmann05b701f2014-01-29 12:43:56 +0100527 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
528 if (ret)
529 goto err_minors;
530 }
531
David Herrmanna3ccc462016-08-03 20:04:25 +0200532 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
David Herrmann05b701f2014-01-29 12:43:56 +0100533 if (ret)
534 goto err_minors;
535
Chris Wilsonb209aca2016-06-15 13:17:46 +0100536 ret = drm_ht_create(&dev->map_hash, 12);
537 if (ret)
David Herrmann05b701f2014-01-29 12:43:56 +0100538 goto err_minors;
David Herrmann1bb72532013-10-02 11:23:34 +0200539
Daniel Vetterba6976c2015-06-23 11:22:36 +0200540 drm_legacy_ctxbitmap_init(dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200541
Andrzej Hajda1bcecfa2014-09-30 16:49:56 +0200542 if (drm_core_check_feature(dev, DRIVER_GEM)) {
David Herrmann1bb72532013-10-02 11:23:34 +0200543 ret = drm_gem_init(dev);
544 if (ret) {
545 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
546 goto err_ctxbitmap;
547 }
548 }
549
Daniel Vetter5079c462016-06-21 10:54:14 +0200550 /* Use the parent device name as DRM device unique identifier, but fall
551 * back to the driver name for virtual devices like vgem. */
552 ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
553 if (ret)
554 goto err_setunique;
Nicolas Ioosse112e592015-12-11 11:20:28 +0100555
Chris Wilsonb209aca2016-06-15 13:17:46 +0100556 return 0;
David Herrmann1bb72532013-10-02 11:23:34 +0200557
Nicolas Ioosse112e592015-12-11 11:20:28 +0100558err_setunique:
559 if (drm_core_check_feature(dev, DRIVER_GEM))
560 drm_gem_destroy(dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200561err_ctxbitmap:
David Herrmanne7b960702014-07-24 12:10:04 +0200562 drm_legacy_ctxbitmap_cleanup(dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200563 drm_ht_remove(&dev->map_hash);
David Herrmann05b701f2014-01-29 12:43:56 +0100564err_minors:
David Herrmanna3ccc462016-08-03 20:04:25 +0200565 drm_minor_free(dev, DRM_MINOR_PRIMARY);
David Herrmannbd9dfa92014-01-29 12:55:48 +0100566 drm_minor_free(dev, DRM_MINOR_RENDER);
567 drm_minor_free(dev, DRM_MINOR_CONTROL);
David Herrmann6796cb12014-01-03 14:24:19 +0100568 drm_fs_inode_free(dev->anon_inode);
David Herrmann1bb72532013-10-02 11:23:34 +0200569err_free:
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100570 mutex_destroy(&dev->master_mutex);
Chris Wilsonb209aca2016-06-15 13:17:46 +0100571 return ret;
572}
573EXPORT_SYMBOL(drm_dev_init);
574
575/**
576 * drm_dev_alloc - Allocate new DRM device
577 * @driver: DRM driver to allocate device for
578 * @parent: Parent device object
579 *
580 * Allocate and initialize a new DRM device. No device registration is done.
581 * Call drm_dev_register() to advertice the device to user space and register it
582 * with other core subsystems. This should be done last in the device
583 * initialization sequence to make sure userspace can't access an inconsistent
584 * state.
585 *
586 * The initial ref-count of the object is 1. Use drm_dev_ref() and
587 * drm_dev_unref() to take and drop further ref-counts.
588 *
589 * Note that for purely virtual devices @parent can be NULL.
590 *
591 * Drivers that wish to subclass or embed struct &drm_device into their
592 * own struct should look at using drm_dev_init() instead.
593 *
594 * RETURNS:
Tom Gundersen0f288602016-09-21 16:59:19 +0200595 * Pointer to new DRM device, or ERR_PTR on failure.
Chris Wilsonb209aca2016-06-15 13:17:46 +0100596 */
597struct drm_device *drm_dev_alloc(struct drm_driver *driver,
598 struct device *parent)
599{
600 struct drm_device *dev;
601 int ret;
602
603 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
604 if (!dev)
Tom Gundersen0f288602016-09-21 16:59:19 +0200605 return ERR_PTR(-ENOMEM);
Chris Wilsonb209aca2016-06-15 13:17:46 +0100606
607 ret = drm_dev_init(dev, driver, parent);
608 if (ret) {
609 kfree(dev);
Tom Gundersen0f288602016-09-21 16:59:19 +0200610 return ERR_PTR(ret);
Chris Wilsonb209aca2016-06-15 13:17:46 +0100611 }
612
613 return dev;
David Herrmann1bb72532013-10-02 11:23:34 +0200614}
615EXPORT_SYMBOL(drm_dev_alloc);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200616
David Herrmann099d1c22014-01-29 10:21:36 +0100617static void drm_dev_release(struct kref *ref)
David Herrmann0dc8fe52013-10-02 11:23:37 +0200618{
David Herrmann099d1c22014-01-29 10:21:36 +0100619 struct drm_device *dev = container_of(ref, struct drm_device, ref);
David Herrmann8f6599d2013-10-20 18:55:45 +0200620
Andrzej Hajda1bcecfa2014-09-30 16:49:56 +0200621 if (drm_core_check_feature(dev, DRIVER_GEM))
David Herrmann0dc8fe52013-10-02 11:23:37 +0200622 drm_gem_destroy(dev);
623
David Herrmanne7b960702014-07-24 12:10:04 +0200624 drm_legacy_ctxbitmap_cleanup(dev);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200625 drm_ht_remove(&dev->map_hash);
David Herrmann6796cb12014-01-03 14:24:19 +0100626 drm_fs_inode_free(dev->anon_inode);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200627
David Herrmanna3ccc462016-08-03 20:04:25 +0200628 drm_minor_free(dev, DRM_MINOR_PRIMARY);
David Herrmannbd9dfa92014-01-29 12:55:48 +0100629 drm_minor_free(dev, DRM_MINOR_RENDER);
630 drm_minor_free(dev, DRM_MINOR_CONTROL);
631
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100632 mutex_destroy(&dev->master_mutex);
Thierry Redingca8e2ad2014-04-11 15:23:00 +0200633 kfree(dev->unique);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200634 kfree(dev);
635}
David Herrmann099d1c22014-01-29 10:21:36 +0100636
637/**
638 * drm_dev_ref - Take reference of a DRM device
639 * @dev: device to take reference of or NULL
640 *
641 * This increases the ref-count of @dev by one. You *must* already own a
642 * reference when calling this. Use drm_dev_unref() to drop this reference
643 * again.
644 *
645 * This function never fails. However, this function does not provide *any*
646 * guarantee whether the device is alive or running. It only provides a
647 * reference to the object and the memory associated with it.
648 */
649void drm_dev_ref(struct drm_device *dev)
650{
651 if (dev)
652 kref_get(&dev->ref);
653}
654EXPORT_SYMBOL(drm_dev_ref);
655
656/**
657 * drm_dev_unref - Drop reference of a DRM device
658 * @dev: device to drop reference of or NULL
659 *
660 * This decreases the ref-count of @dev by one. The device is destroyed if the
661 * ref-count drops to zero.
662 */
663void drm_dev_unref(struct drm_device *dev)
664{
665 if (dev)
666 kref_put(&dev->ref, drm_dev_release);
667}
668EXPORT_SYMBOL(drm_dev_unref);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200669
670/**
David Herrmannc22f0ac2013-10-02 11:23:35 +0200671 * drm_dev_register - Register DRM device
672 * @dev: Device to register
Thierry Redingc6a1af8a2014-05-19 13:39:07 +0200673 * @flags: Flags passed to the driver's .load() function
David Herrmannc22f0ac2013-10-02 11:23:35 +0200674 *
675 * Register the DRM device @dev with the system, advertise device to user-space
676 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
Chris Wilsone28cd4d2016-06-17 09:25:17 +0100677 * previously.
David Herrmannc22f0ac2013-10-02 11:23:35 +0200678 *
679 * Never call this twice on any device!
680 *
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200681 * NOTE: To ensure backward compatibility with existing drivers method this
682 * function calls the ->load() method after registering the device nodes,
683 * creating race conditions. Usage of the ->load() methods is therefore
684 * deprecated, drivers must perform all initialization before calling
685 * drm_dev_register().
686 *
David Herrmannc22f0ac2013-10-02 11:23:35 +0200687 * RETURNS:
688 * 0 on success, negative error code on failure.
689 */
690int drm_dev_register(struct drm_device *dev, unsigned long flags)
691{
692 int ret;
693
694 mutex_lock(&drm_global_mutex);
695
David Herrmannafcdbc82014-01-29 12:57:05 +0100696 ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200697 if (ret)
David Herrmann05b701f2014-01-29 12:43:56 +0100698 goto err_minors;
699
David Herrmannafcdbc82014-01-29 12:57:05 +0100700 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
David Herrmann05b701f2014-01-29 12:43:56 +0100701 if (ret)
702 goto err_minors;
703
David Herrmanna3ccc462016-08-03 20:04:25 +0200704 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
David Herrmann05b701f2014-01-29 12:43:56 +0100705 if (ret)
706 goto err_minors;
David Herrmannc22f0ac2013-10-02 11:23:35 +0200707
708 if (dev->driver->load) {
709 ret = dev->driver->load(dev, flags);
710 if (ret)
David Herrmann05b701f2014-01-29 12:43:56 +0100711 goto err_minors;
David Herrmannc22f0ac2013-10-02 11:23:35 +0200712 }
713
Chris Wilsonbee7fb12016-06-18 14:46:41 +0100714 if (drm_core_check_feature(dev, DRIVER_MODESET))
Benjamin Gaignard79190ea2016-06-21 16:37:09 +0200715 drm_modeset_register_all(dev);
Chris Wilsone28cd4d2016-06-17 09:25:17 +0100716
David Herrmannc22f0ac2013-10-02 11:23:35 +0200717 ret = 0;
718 goto out_unlock;
719
David Herrmann05b701f2014-01-29 12:43:56 +0100720err_minors:
David Herrmanna3ccc462016-08-03 20:04:25 +0200721 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
David Herrmannafcdbc82014-01-29 12:57:05 +0100722 drm_minor_unregister(dev, DRM_MINOR_RENDER);
723 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200724out_unlock:
725 mutex_unlock(&drm_global_mutex);
726 return ret;
727}
728EXPORT_SYMBOL(drm_dev_register);
David Herrmannc3a49732013-10-02 11:23:38 +0200729
730/**
731 * drm_dev_unregister - Unregister DRM device
732 * @dev: Device to unregister
733 *
734 * Unregister the DRM device from the system. This does the reverse of
735 * drm_dev_register() but does not deallocate the device. The caller must call
David Herrmann099d1c22014-01-29 10:21:36 +0100736 * drm_dev_unref() to drop their final reference.
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200737 *
738 * This should be called first in the device teardown code to make sure
739 * userspace can't access the device instance any more.
David Herrmannc3a49732013-10-02 11:23:38 +0200740 */
741void drm_dev_unregister(struct drm_device *dev)
742{
743 struct drm_map_list *r_list, *list_temp;
744
745 drm_lastclose(dev);
746
Chris Wilsonbee7fb12016-06-18 14:46:41 +0100747 if (drm_core_check_feature(dev, DRIVER_MODESET))
Benjamin Gaignard79190ea2016-06-21 16:37:09 +0200748 drm_modeset_unregister_all(dev);
Chris Wilsone28cd4d2016-06-17 09:25:17 +0100749
David Herrmannc3a49732013-10-02 11:23:38 +0200750 if (dev->driver->unload)
751 dev->driver->unload(dev);
752
Daniel Vetter4efafeb2013-12-11 11:34:38 +0100753 if (dev->agp)
754 drm_pci_agp_destroy(dev);
David Herrmannc3a49732013-10-02 11:23:38 +0200755
756 drm_vblank_cleanup(dev);
757
758 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
David Herrmann9fc5cde2014-08-29 12:12:28 +0200759 drm_legacy_rmmap(dev, r_list->map);
David Herrmannc3a49732013-10-02 11:23:38 +0200760
David Herrmanna3ccc462016-08-03 20:04:25 +0200761 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
David Herrmannafcdbc82014-01-29 12:57:05 +0100762 drm_minor_unregister(dev, DRM_MINOR_RENDER);
763 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
David Herrmannc3a49732013-10-02 11:23:38 +0200764}
765EXPORT_SYMBOL(drm_dev_unregister);
Thierry Redingca8e2ad2014-04-11 15:23:00 +0200766
David Herrmann1b7199f2014-07-23 12:29:56 +0200767/*
768 * DRM Core
769 * The DRM core module initializes all global DRM objects and makes them
770 * available to drivers. Once setup, drivers can probe their respective
771 * devices.
772 * Currently, core management includes:
773 * - The "DRM-Global" key/value database
774 * - Global ID management for connectors
775 * - DRM major number allocation
776 * - DRM minor management
777 * - DRM sysfs class
778 * - DRM debugfs root
779 *
780 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
781 * interface registered on a DRM device, you can request minor numbers from DRM
782 * core. DRM core takes care of major-number management and char-dev
783 * registration. A stub ->open() callback forwards any open() requests to the
784 * registered minor.
785 */
786
787static int drm_stub_open(struct inode *inode, struct file *filp)
788{
789 const struct file_operations *new_fops;
790 struct drm_minor *minor;
791 int err;
792
793 DRM_DEBUG("\n");
794
795 mutex_lock(&drm_global_mutex);
796 minor = drm_minor_acquire(iminor(inode));
797 if (IS_ERR(minor)) {
798 err = PTR_ERR(minor);
799 goto out_unlock;
800 }
801
802 new_fops = fops_get(minor->dev->driver->fops);
803 if (!new_fops) {
804 err = -ENODEV;
805 goto out_release;
806 }
807
808 replace_fops(filp, new_fops);
809 if (filp->f_op->open)
810 err = filp->f_op->open(inode, filp);
811 else
812 err = 0;
813
814out_release:
815 drm_minor_release(minor);
816out_unlock:
817 mutex_unlock(&drm_global_mutex);
818 return err;
819}
820
821static const struct file_operations drm_stub_fops = {
822 .owner = THIS_MODULE,
823 .open = drm_stub_open,
824 .llseek = noop_llseek,
825};
826
David Herrmann2cc107d2016-09-01 14:48:37 +0200827static void drm_core_exit(void)
828{
829 unregister_chrdev(DRM_MAJOR, "drm");
830 debugfs_remove(drm_debugfs_root);
831 drm_sysfs_destroy();
832 idr_destroy(&drm_minors_idr);
833 drm_connector_ida_destroy();
834 drm_global_release();
835}
836
David Herrmann1b7199f2014-07-23 12:29:56 +0200837static int __init drm_core_init(void)
838{
David Herrmann2cc107d2016-09-01 14:48:37 +0200839 int ret;
David Herrmann1b7199f2014-07-23 12:29:56 +0200840
841 drm_global_init();
842 drm_connector_ida_init();
843 idr_init(&drm_minors_idr);
844
David Herrmannfcc90212015-09-09 14:21:30 +0200845 ret = drm_sysfs_init();
846 if (ret < 0) {
David Herrmann2cc107d2016-09-01 14:48:37 +0200847 DRM_ERROR("Cannot create DRM class: %d\n", ret);
848 goto error;
David Herrmann1b7199f2014-07-23 12:29:56 +0200849 }
850
851 drm_debugfs_root = debugfs_create_dir("dri", NULL);
852 if (!drm_debugfs_root) {
David Herrmann2cc107d2016-09-01 14:48:37 +0200853 ret = -ENOMEM;
854 DRM_ERROR("Cannot create debugfs-root: %d\n", ret);
855 goto error;
David Herrmann1b7199f2014-07-23 12:29:56 +0200856 }
857
David Herrmann2cc107d2016-09-01 14:48:37 +0200858 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
859 if (ret < 0)
860 goto error;
861
David Herrmann82d5e732016-09-01 14:48:36 +0200862 DRM_INFO("Initialized\n");
David Herrmann1b7199f2014-07-23 12:29:56 +0200863 return 0;
David Herrmann1b7199f2014-07-23 12:29:56 +0200864
David Herrmann2cc107d2016-09-01 14:48:37 +0200865error:
866 drm_core_exit();
David Herrmann1b7199f2014-07-23 12:29:56 +0200867 return ret;
868}
869
David Herrmann1b7199f2014-07-23 12:29:56 +0200870module_init(drm_core_init);
871module_exit(drm_core_exit);