blob: a934fd5e7e5590bb187ef79f5fa4c5828a4a57c8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3 *
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
Thierry Redingc6a1af8a2014-05-19 13:39:07 +02007 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
David Herrmann1b7199f2014-07-23 12:29:56 +020029#include <linux/debugfs.h>
David Herrmann31bbe162014-01-03 14:09:47 +010030#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/module.h>
32#include <linux/moduleparam.h>
David Herrmann31bbe162014-01-03 14:09:47 +010033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Daniel Vetter85e634b2016-11-14 12:58:19 +010035
36#include <drm/drm_drv.h>
David Howells760285e2012-10-02 18:01:07 +010037#include <drm/drmP.h>
Daniel Vetter85e634b2016-11-14 12:58:19 +010038
Benjamin Gaignard79190ea2016-06-21 16:37:09 +020039#include "drm_crtc_internal.h"
David Herrmanne7b960702014-07-24 12:10:04 +020040#include "drm_legacy.h"
Daniel Vetter67d0ec42014-09-10 12:43:53 +020041#include "drm_internal.h"
Daniel Vetter81065542016-06-21 10:54:13 +020042#include "drm_crtc_internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Ezequiel Garcia6dc3e222016-04-20 13:45:03 -030044/*
45 * drm_debug: Enable debug output.
46 * Bitmask of DRM_UT_x. See include/drm/drmP.h for details.
47 */
48unsigned int drm_debug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049EXPORT_SYMBOL(drm_debug);
50
David Herrmann82d5e732016-09-01 14:48:36 +020051MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
52MODULE_DESCRIPTION("DRM shared core routines");
Linus Torvalds1da177e2005-04-16 15:20:36 -070053MODULE_LICENSE("GPL and additional rights");
Ezequiel Garcia6dc3e222016-04-20 13:45:03 -030054MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
55"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
56"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
57"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
58"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
59"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
Keith Packarde7646f82017-03-17 15:33:22 -070060"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
61"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)");
Dave Jonesc0758142005-10-03 15:02:20 -040062module_param_named(debug, drm_debug, int, 0600);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
David Herrmann0d639882014-02-24 15:53:25 +010064static DEFINE_SPINLOCK(drm_minor_lock);
David Herrmann1b7199f2014-07-23 12:29:56 +020065static struct idr drm_minors_idr;
Dave Airlie2c14f282008-04-21 16:47:32 +100066
Alexandru Moise371c2272017-07-08 23:43:52 +020067/*
68 * If the drm core fails to init for whatever reason,
69 * we should prevent any drivers from registering with it.
70 * It's best to check this at drm_dev_init(), as some drivers
71 * prefer to embed struct drm_device into their own device
72 * structure and call drm_dev_init() themselves.
73 */
74static bool drm_core_init_complete = false;
75
David Herrmann1b7199f2014-07-23 12:29:56 +020076static struct dentry *drm_debugfs_root;
Joe Perches5ad3d882011-04-17 20:35:51 -070077
Sean Paulc4e68a52016-08-15 16:18:04 -070078#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
79
80void drm_dev_printk(const struct device *dev, const char *level,
81 unsigned int category, const char *function_name,
82 const char *prefix, const char *format, ...)
Joe Perches5ad3d882011-04-17 20:35:51 -070083{
84 struct va_format vaf;
85 va_list args;
Joe Perches5ad3d882011-04-17 20:35:51 -070086
Sean Paulc4e68a52016-08-15 16:18:04 -070087 if (category != DRM_UT_NONE && !(drm_debug & category))
88 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Lespiau, Damiena73d4e92014-03-24 15:53:15 +000090 va_start(args, format);
91 vaf.fmt = format;
92 vaf.va = &args;
Daniel Vetterfffb9062013-11-17 22:25:02 +010093
Chris Wilsonb4ba97e2016-08-19 08:37:50 +010094 if (dev)
95 dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix,
96 &vaf);
97 else
98 printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
Lespiau, Damiena73d4e92014-03-24 15:53:15 +000099
100 va_end(args);
yakui_zhao4fefcb22009-06-02 14:09:47 +0800101}
Sean Paulc4e68a52016-08-15 16:18:04 -0700102EXPORT_SYMBOL(drm_dev_printk);
103
104void drm_printk(const char *level, unsigned int category,
Sean Paulc4e68a52016-08-15 16:18:04 -0700105 const char *format, ...)
106{
107 struct va_format vaf;
108 va_list args;
109
110 if (category != DRM_UT_NONE && !(drm_debug & category))
111 return;
112
113 va_start(args, format);
114 vaf.fmt = format;
115 vaf.va = &args;
116
Joe Perches6bd488d2016-09-25 19:18:34 -0700117 printk("%s" "[" DRM_NAME ":%ps]%s %pV",
118 level, __builtin_return_address(0),
119 strcmp(level, KERN_ERR) == 0 ? " *ERROR*" : "", &vaf);
Sean Paulc4e68a52016-08-15 16:18:04 -0700120
121 va_end(args);
122}
123EXPORT_SYMBOL(drm_printk);
Joe Perches5ad3d882011-04-17 20:35:51 -0700124
David Herrmann0d639882014-02-24 15:53:25 +0100125/*
126 * DRM Minors
127 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
128 * of them is represented by a drm_minor object. Depending on the capabilities
129 * of the device-driver, different interfaces are registered.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 *
David Herrmann0d639882014-02-24 15:53:25 +0100131 * Minors can be accessed via dev->$minor_name. This pointer is either
132 * NULL or a valid drm_minor pointer and stays valid as long as the device is
133 * valid. This means, DRM minors have the same life-time as the underlying
134 * device. However, this doesn't mean that the minor is active. Minors are
135 * registered and unregistered dynamically according to device-state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 */
David Herrmann0d639882014-02-24 15:53:25 +0100137
David Herrmann05b701f2014-01-29 12:43:56 +0100138static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
139 unsigned int type)
140{
141 switch (type) {
David Herrmanna3ccc462016-08-03 20:04:25 +0200142 case DRM_MINOR_PRIMARY:
David Herrmann05b701f2014-01-29 12:43:56 +0100143 return &dev->primary;
144 case DRM_MINOR_RENDER:
145 return &dev->render;
146 case DRM_MINOR_CONTROL:
147 return &dev->control;
148 default:
149 return NULL;
150 }
151}
152
153static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
154{
155 struct drm_minor *minor;
David Herrmannf1b85962014-07-23 10:34:52 +0200156 unsigned long flags;
157 int r;
David Herrmann05b701f2014-01-29 12:43:56 +0100158
159 minor = kzalloc(sizeof(*minor), GFP_KERNEL);
160 if (!minor)
161 return -ENOMEM;
162
163 minor->type = type;
164 minor->dev = dev;
David Herrmann05b701f2014-01-29 12:43:56 +0100165
David Herrmannf1b85962014-07-23 10:34:52 +0200166 idr_preload(GFP_KERNEL);
167 spin_lock_irqsave(&drm_minor_lock, flags);
168 r = idr_alloc(&drm_minors_idr,
169 NULL,
170 64 * type,
171 64 * (type + 1),
172 GFP_NOWAIT);
173 spin_unlock_irqrestore(&drm_minor_lock, flags);
174 idr_preload_end();
175
176 if (r < 0)
177 goto err_free;
178
179 minor->index = r;
180
David Herrmanne1728072014-07-23 11:38:38 +0200181 minor->kdev = drm_sysfs_minor_alloc(minor);
182 if (IS_ERR(minor->kdev)) {
183 r = PTR_ERR(minor->kdev);
184 goto err_index;
185 }
186
David Herrmann05b701f2014-01-29 12:43:56 +0100187 *drm_minor_get_slot(dev, type) = minor;
188 return 0;
David Herrmannf1b85962014-07-23 10:34:52 +0200189
David Herrmanne1728072014-07-23 11:38:38 +0200190err_index:
191 spin_lock_irqsave(&drm_minor_lock, flags);
192 idr_remove(&drm_minors_idr, minor->index);
193 spin_unlock_irqrestore(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200194err_free:
195 kfree(minor);
196 return r;
David Herrmann05b701f2014-01-29 12:43:56 +0100197}
198
David Herrmannbd9dfa92014-01-29 12:55:48 +0100199static void drm_minor_free(struct drm_device *dev, unsigned int type)
200{
David Herrmannf1b85962014-07-23 10:34:52 +0200201 struct drm_minor **slot, *minor;
202 unsigned long flags;
David Herrmannbd9dfa92014-01-29 12:55:48 +0100203
204 slot = drm_minor_get_slot(dev, type);
David Herrmannf1b85962014-07-23 10:34:52 +0200205 minor = *slot;
206 if (!minor)
207 return;
208
David Herrmanne1728072014-07-23 11:38:38 +0200209 put_device(minor->kdev);
David Herrmannf1b85962014-07-23 10:34:52 +0200210
211 spin_lock_irqsave(&drm_minor_lock, flags);
212 idr_remove(&drm_minors_idr, minor->index);
213 spin_unlock_irqrestore(&drm_minor_lock, flags);
214
215 kfree(minor);
216 *slot = NULL;
David Herrmannbd9dfa92014-01-29 12:55:48 +0100217}
218
David Herrmannafcdbc82014-01-29 12:57:05 +0100219static int drm_minor_register(struct drm_device *dev, unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220{
David Herrmannf1b85962014-07-23 10:34:52 +0200221 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100222 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 DRM_DEBUG("\n");
226
David Herrmannf1b85962014-07-23 10:34:52 +0200227 minor = *drm_minor_get_slot(dev, type);
228 if (!minor)
David Herrmann05b701f2014-01-29 12:43:56 +0100229 return 0;
230
David Herrmannf1b85962014-07-23 10:34:52 +0200231 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
Ben Gamari955b12d2009-02-17 20:08:49 -0500232 if (ret) {
GeunSik Lim156f5a72009-06-02 15:01:37 +0900233 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
Noralf Trønnesa67834f2017-01-26 23:56:04 +0100234 goto err_debugfs;
Ben Gamari955b12d2009-02-17 20:08:49 -0500235 }
Dave Airlie2c14f282008-04-21 16:47:32 +1000236
David Herrmanne1728072014-07-23 11:38:38 +0200237 ret = device_add(minor->kdev);
238 if (ret)
Daniel Vettercb6458f2013-08-08 15:41:34 +0200239 goto err_debugfs;
Dave Airlie2c14f282008-04-21 16:47:32 +1000240
David Herrmann0d639882014-02-24 15:53:25 +0100241 /* replace NULL with @minor so lookups will succeed from now on */
242 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200243 idr_replace(&drm_minors_idr, minor, minor->index);
David Herrmann0d639882014-02-24 15:53:25 +0100244 spin_unlock_irqrestore(&drm_minor_lock, flags);
Dave Airlie2c14f282008-04-21 16:47:32 +1000245
David Herrmannf1b85962014-07-23 10:34:52 +0200246 DRM_DEBUG("new minor registered %d\n", minor->index);
Dave Airlie2c14f282008-04-21 16:47:32 +1000247 return 0;
248
Daniel Vettercb6458f2013-08-08 15:41:34 +0200249err_debugfs:
David Herrmannf1b85962014-07-23 10:34:52 +0200250 drm_debugfs_cleanup(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 return ret;
252}
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000253
David Herrmannafcdbc82014-01-29 12:57:05 +0100254static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
David Herrmannf73aca52013-10-20 18:55:40 +0200255{
David Herrmannafcdbc82014-01-29 12:57:05 +0100256 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100257 unsigned long flags;
David Herrmannafcdbc82014-01-29 12:57:05 +0100258
259 minor = *drm_minor_get_slot(dev, type);
David Herrmanne1728072014-07-23 11:38:38 +0200260 if (!minor || !device_is_registered(minor->kdev))
David Herrmannf73aca52013-10-20 18:55:40 +0200261 return;
262
David Herrmannf1b85962014-07-23 10:34:52 +0200263 /* replace @minor with NULL so lookups will fail from now on */
David Herrmann0d639882014-02-24 15:53:25 +0100264 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200265 idr_replace(&drm_minors_idr, NULL, minor->index);
David Herrmann0d639882014-02-24 15:53:25 +0100266 spin_unlock_irqrestore(&drm_minor_lock, flags);
David Herrmann0d639882014-02-24 15:53:25 +0100267
David Herrmanne1728072014-07-23 11:38:38 +0200268 device_del(minor->kdev);
269 dev_set_drvdata(minor->kdev, NULL); /* safety belt */
David Herrmann865fb472013-10-20 18:55:43 +0200270 drm_debugfs_cleanup(minor);
David Herrmannf73aca52013-10-20 18:55:40 +0200271}
272
Daniel Vetter85e634b2016-11-14 12:58:19 +0100273/*
David Herrmann1616c522014-01-29 10:49:19 +0100274 * Looks up the given minor-ID and returns the respective DRM-minor object. The
275 * refence-count of the underlying device is increased so you must release this
276 * object with drm_minor_release().
277 *
278 * As long as you hold this minor, it is guaranteed that the object and the
279 * minor->dev pointer will stay valid! However, the device may get unplugged and
280 * unregistered while you hold the minor.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 */
David Herrmann1616c522014-01-29 10:49:19 +0100282struct drm_minor *drm_minor_acquire(unsigned int minor_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283{
David Herrmann1616c522014-01-29 10:49:19 +0100284 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100285 unsigned long flags;
Eric Anholt673a3942008-07-30 12:06:12 -0700286
David Herrmann0d639882014-02-24 15:53:25 +0100287 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmann1616c522014-01-29 10:49:19 +0100288 minor = idr_find(&drm_minors_idr, minor_id);
David Herrmann0d639882014-02-24 15:53:25 +0100289 if (minor)
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530290 drm_dev_get(minor->dev);
David Herrmann0d639882014-02-24 15:53:25 +0100291 spin_unlock_irqrestore(&drm_minor_lock, flags);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000292
David Herrmann0d639882014-02-24 15:53:25 +0100293 if (!minor) {
294 return ERR_PTR(-ENODEV);
Daniel Vetterc07dcd62017-08-02 13:56:02 +0200295 } else if (drm_dev_is_unplugged(minor->dev)) {
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530296 drm_dev_put(minor->dev);
David Herrmann0d639882014-02-24 15:53:25 +0100297 return ERR_PTR(-ENODEV);
298 }
299
David Herrmann1616c522014-01-29 10:49:19 +0100300 return minor;
301}
302
David Herrmann1616c522014-01-29 10:49:19 +0100303void drm_minor_release(struct drm_minor *minor)
304{
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530305 drm_dev_put(minor->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500307
308/**
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200309 * DOC: driver instance overview
310 *
Daniel Vetterea0dd852016-12-29 21:48:26 +0100311 * A device instance for a drm driver is represented by &struct drm_device. This
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200312 * is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
313 * callbacks implemented by the driver. The driver then needs to initialize all
314 * the various subsystems for the drm device like memory management, vblank
315 * handling, modesetting support and intial output configuration plus obviously
Gerd Hoffmanna6b5fac2016-10-04 11:09:35 +0200316 * initialize all the corresponding hardware bits. An important part of this is
317 * also calling drm_dev_set_unique() to set the userspace-visible unique name of
318 * this device instance. Finally when everything is up and running and ready for
319 * userspace the device instance can be published using drm_dev_register().
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200320 *
321 * There is also deprecated support for initalizing device instances using
Daniel Vetteref40cbf92017-01-25 07:26:47 +0100322 * bus-specific helpers and the &drm_driver.load callback. But due to
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200323 * backwards-compatibility needs the device instance have to be published too
324 * early, which requires unpretty global locking to make safe and is therefore
325 * only support for existing drivers not yet converted to the new scheme.
326 *
327 * When cleaning up a device instance everything needs to be done in reverse:
328 * First unpublish the device instance with drm_dev_unregister(). Then clean up
329 * any other resources allocated at device initialization and drop the driver's
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530330 * reference to &drm_device using drm_dev_put().
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200331 *
332 * Note that the lifetime rules for &drm_device instance has still a lot of
333 * historical baggage. Hence use the reference counting provided by
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530334 * drm_dev_get() and drm_dev_put() only carefully.
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200335 *
Daniel Vetterea0dd852016-12-29 21:48:26 +0100336 * It is recommended that drivers embed &struct drm_device into their own device
Daniel Vetterd82faaf2016-12-08 11:28:47 +0100337 * structure, which is supported through drm_dev_init().
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200338 */
339
340/**
Thierry Redingc6a1af8a2014-05-19 13:39:07 +0200341 * drm_put_dev - Unregister and release a DRM device
342 * @dev: DRM device
343 *
344 * Called at module unload time or when a PCI device is unplugged.
345 *
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500346 * Cleans up all DRM device, calling drm_lastclose().
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200347 *
348 * Note: Use of this function is deprecated. It will eventually go away
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530349 * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200350 * instead to make sure that the device isn't userspace accessible any more
351 * while teardown is in progress, ensuring that userspace can't access an
352 * inconsistent state.
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500353 */
354void drm_put_dev(struct drm_device *dev)
355{
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500356 DRM_DEBUG("\n");
357
358 if (!dev) {
359 DRM_ERROR("cleanup called no dev\n");
360 return;
361 }
362
David Herrmannc3a49732013-10-02 11:23:38 +0200363 drm_dev_unregister(dev);
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530364 drm_dev_put(dev);
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500365}
366EXPORT_SYMBOL(drm_put_dev);
Dave Airlie2c07a212012-02-20 14:18:07 +0000367
Daniel Vetterc07dcd62017-08-02 13:56:02 +0200368static void drm_device_set_unplugged(struct drm_device *dev)
369{
370 smp_wmb();
371 atomic_set(&dev->unplugged, 1);
372}
373
374/**
375 * drm_dev_unplug - unplug a DRM device
376 * @dev: DRM device
377 *
378 * This unplugs a hotpluggable DRM device, which makes it inaccessible to
379 * userspace operations. Entry-points can use drm_dev_is_unplugged(). This
380 * essentially unregisters the device like drm_dev_unregister(), but can be
381 * called while there are still open users of @dev.
382 */
383void drm_dev_unplug(struct drm_device *dev)
Dave Airlie2c07a212012-02-20 14:18:07 +0000384{
Daniel Vetter04699012017-08-02 13:56:04 +0200385 drm_dev_unregister(dev);
Dave Airlie2c07a212012-02-20 14:18:07 +0000386
387 mutex_lock(&drm_global_mutex);
Dave Airlie2c07a212012-02-20 14:18:07 +0000388 drm_device_set_unplugged(dev);
Daniel Vetter04699012017-08-02 13:56:04 +0200389 if (dev->open_count == 0)
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530390 drm_dev_put(dev);
Dave Airlie2c07a212012-02-20 14:18:07 +0000391 mutex_unlock(&drm_global_mutex);
392}
Daniel Vetterc07dcd62017-08-02 13:56:02 +0200393EXPORT_SYMBOL(drm_dev_unplug);
David Herrmann1bb72532013-10-02 11:23:34 +0200394
David Herrmann31bbe162014-01-03 14:09:47 +0100395/*
396 * DRM internal mount
397 * We want to be able to allocate our own "struct address_space" to control
398 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
399 * stand-alone address_space objects, so we need an underlying inode. As there
400 * is no way to allocate an independent inode easily, we need a fake internal
401 * VFS mount-point.
402 *
403 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
404 * frees it again. You are allowed to use iget() and iput() to get references to
405 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
406 * drm_fs_inode_free() call (which does not have to be the last iput()).
407 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
408 * between multiple inode-users. You could, technically, call
409 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
410 * iput(), but this way you'd end up with a new vfsmount for each inode.
411 */
412
413static int drm_fs_cnt;
414static struct vfsmount *drm_fs_mnt;
415
416static const struct dentry_operations drm_fs_dops = {
417 .d_dname = simple_dname,
418};
419
420static const struct super_operations drm_fs_sops = {
421 .statfs = simple_statfs,
422};
423
424static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
425 const char *dev_name, void *data)
426{
427 return mount_pseudo(fs_type,
428 "drm:",
429 &drm_fs_sops,
430 &drm_fs_dops,
431 0x010203ff);
432}
433
434static struct file_system_type drm_fs_type = {
435 .name = "drm",
436 .owner = THIS_MODULE,
437 .mount = drm_fs_mount,
438 .kill_sb = kill_anon_super,
439};
440
441static struct inode *drm_fs_inode_new(void)
442{
443 struct inode *inode;
444 int r;
445
446 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
447 if (r < 0) {
448 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
449 return ERR_PTR(r);
450 }
451
452 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
453 if (IS_ERR(inode))
454 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
455
456 return inode;
457}
458
459static void drm_fs_inode_free(struct inode *inode)
460{
461 if (inode) {
462 iput(inode);
463 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
464 }
465}
466
David Herrmann1bb72532013-10-02 11:23:34 +0200467/**
Chris Wilsonb209aca2016-06-15 13:17:46 +0100468 * drm_dev_init - Initialise new DRM device
469 * @dev: DRM device
470 * @driver: DRM driver
David Herrmann1bb72532013-10-02 11:23:34 +0200471 * @parent: Parent device object
472 *
Chris Wilsonb209aca2016-06-15 13:17:46 +0100473 * Initialize a new DRM device. No device registration is done.
David Herrmannc22f0ac2013-10-02 11:23:35 +0200474 * Call drm_dev_register() to advertice the device to user space and register it
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200475 * with other core subsystems. This should be done last in the device
476 * initialization sequence to make sure userspace can't access an inconsistent
477 * state.
David Herrmann1bb72532013-10-02 11:23:34 +0200478 *
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530479 * The initial ref-count of the object is 1. Use drm_dev_get() and
480 * drm_dev_put() to take and drop further ref-counts.
David Herrmann099d1c22014-01-29 10:21:36 +0100481 *
Daniel Vetterb0ff4b92014-11-24 20:01:58 +0100482 * Note that for purely virtual devices @parent can be NULL.
483 *
Chris Wilsonb209aca2016-06-15 13:17:46 +0100484 * Drivers that do not want to allocate their own device struct
Daniel Vetterea0dd852016-12-29 21:48:26 +0100485 * embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
486 * that do embed &struct drm_device it must be placed first in the overall
Daniel Vetterd82faaf2016-12-08 11:28:47 +0100487 * structure, and the overall structure must be allocated using kmalloc(): The
488 * drm core's release function unconditionally calls kfree() on the @dev pointer
Chris Wilsonf30c9252017-02-02 09:36:32 +0000489 * when the final reference is released. To override this behaviour, and so
490 * allow embedding of the drm_device inside the driver's device struct at an
491 * arbitrary offset, you must supply a &drm_driver.release callback and control
492 * the finalization explicitly.
Chris Wilsonb209aca2016-06-15 13:17:46 +0100493 *
David Herrmann1bb72532013-10-02 11:23:34 +0200494 * RETURNS:
Chris Wilsonb209aca2016-06-15 13:17:46 +0100495 * 0 on success, or error code on failure.
David Herrmann1bb72532013-10-02 11:23:34 +0200496 */
Chris Wilsonb209aca2016-06-15 13:17:46 +0100497int drm_dev_init(struct drm_device *dev,
498 struct drm_driver *driver,
499 struct device *parent)
David Herrmann1bb72532013-10-02 11:23:34 +0200500{
David Herrmann1bb72532013-10-02 11:23:34 +0200501 int ret;
502
Alexandru Moise371c2272017-07-08 23:43:52 +0200503 if (!drm_core_init_complete) {
504 DRM_ERROR("DRM core is not initialized\n");
505 return -ENODEV;
506 }
507
David Herrmann099d1c22014-01-29 10:21:36 +0100508 kref_init(&dev->ref);
David Herrmann1bb72532013-10-02 11:23:34 +0200509 dev->dev = parent;
510 dev->driver = driver;
511
512 INIT_LIST_HEAD(&dev->filelist);
513 INIT_LIST_HEAD(&dev->ctxlist);
514 INIT_LIST_HEAD(&dev->vmalist);
515 INIT_LIST_HEAD(&dev->maplist);
516 INIT_LIST_HEAD(&dev->vblank_event_list);
517
Daniel Vetter2177a212013-12-16 11:21:06 +0100518 spin_lock_init(&dev->buf_lock);
David Herrmann1bb72532013-10-02 11:23:34 +0200519 spin_lock_init(&dev->event_lock);
520 mutex_init(&dev->struct_mutex);
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200521 mutex_init(&dev->filelist_mutex);
David Herrmann1bb72532013-10-02 11:23:34 +0200522 mutex_init(&dev->ctxlist_mutex);
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100523 mutex_init(&dev->master_mutex);
David Herrmann1bb72532013-10-02 11:23:34 +0200524
David Herrmann6796cb12014-01-03 14:24:19 +0100525 dev->anon_inode = drm_fs_inode_new();
526 if (IS_ERR(dev->anon_inode)) {
527 ret = PTR_ERR(dev->anon_inode);
528 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
David Herrmann1bb72532013-10-02 11:23:34 +0200529 goto err_free;
David Herrmann6796cb12014-01-03 14:24:19 +0100530 }
531
David Herrmann6d6dfcf2014-03-16 14:38:40 +0100532 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
David Herrmann05b701f2014-01-29 12:43:56 +0100533 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
534 if (ret)
535 goto err_minors;
536 }
537
David Herrmanna3ccc462016-08-03 20:04:25 +0200538 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
David Herrmann05b701f2014-01-29 12:43:56 +0100539 if (ret)
540 goto err_minors;
541
Chris Wilsonb209aca2016-06-15 13:17:46 +0100542 ret = drm_ht_create(&dev->map_hash, 12);
543 if (ret)
David Herrmann05b701f2014-01-29 12:43:56 +0100544 goto err_minors;
David Herrmann1bb72532013-10-02 11:23:34 +0200545
Daniel Vetterba6976c2015-06-23 11:22:36 +0200546 drm_legacy_ctxbitmap_init(dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200547
Andrzej Hajda1bcecfa2014-09-30 16:49:56 +0200548 if (drm_core_check_feature(dev, DRIVER_GEM)) {
David Herrmann1bb72532013-10-02 11:23:34 +0200549 ret = drm_gem_init(dev);
550 if (ret) {
551 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
552 goto err_ctxbitmap;
553 }
554 }
555
Daniel Vetter5079c462016-06-21 10:54:14 +0200556 /* Use the parent device name as DRM device unique identifier, but fall
557 * back to the driver name for virtual devices like vgem. */
558 ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
559 if (ret)
560 goto err_setunique;
Nicolas Ioosse112e592015-12-11 11:20:28 +0100561
Chris Wilsonb209aca2016-06-15 13:17:46 +0100562 return 0;
David Herrmann1bb72532013-10-02 11:23:34 +0200563
Nicolas Ioosse112e592015-12-11 11:20:28 +0100564err_setunique:
565 if (drm_core_check_feature(dev, DRIVER_GEM))
566 drm_gem_destroy(dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200567err_ctxbitmap:
David Herrmanne7b960702014-07-24 12:10:04 +0200568 drm_legacy_ctxbitmap_cleanup(dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200569 drm_ht_remove(&dev->map_hash);
David Herrmann05b701f2014-01-29 12:43:56 +0100570err_minors:
David Herrmanna3ccc462016-08-03 20:04:25 +0200571 drm_minor_free(dev, DRM_MINOR_PRIMARY);
David Herrmannbd9dfa92014-01-29 12:55:48 +0100572 drm_minor_free(dev, DRM_MINOR_RENDER);
573 drm_minor_free(dev, DRM_MINOR_CONTROL);
David Herrmann6796cb12014-01-03 14:24:19 +0100574 drm_fs_inode_free(dev->anon_inode);
David Herrmann1bb72532013-10-02 11:23:34 +0200575err_free:
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100576 mutex_destroy(&dev->master_mutex);
Joonas Lahtinenf92e1ee2016-11-10 15:50:35 +0200577 mutex_destroy(&dev->ctxlist_mutex);
578 mutex_destroy(&dev->filelist_mutex);
579 mutex_destroy(&dev->struct_mutex);
Chris Wilsonb209aca2016-06-15 13:17:46 +0100580 return ret;
581}
582EXPORT_SYMBOL(drm_dev_init);
583
584/**
Chris Wilsonf30c9252017-02-02 09:36:32 +0000585 * drm_dev_fini - Finalize a dead DRM device
586 * @dev: DRM device
587 *
588 * Finalize a dead DRM device. This is the converse to drm_dev_init() and
589 * frees up all data allocated by it. All driver private data should be
590 * finalized first. Note that this function does not free the @dev, that is
591 * left to the caller.
592 *
593 * The ref-count of @dev must be zero, and drm_dev_fini() should only be called
594 * from a &drm_driver.release callback.
595 */
596void drm_dev_fini(struct drm_device *dev)
597{
598 drm_vblank_cleanup(dev);
599
600 if (drm_core_check_feature(dev, DRIVER_GEM))
601 drm_gem_destroy(dev);
602
603 drm_legacy_ctxbitmap_cleanup(dev);
604 drm_ht_remove(&dev->map_hash);
605 drm_fs_inode_free(dev->anon_inode);
606
607 drm_minor_free(dev, DRM_MINOR_PRIMARY);
608 drm_minor_free(dev, DRM_MINOR_RENDER);
609 drm_minor_free(dev, DRM_MINOR_CONTROL);
610
611 mutex_destroy(&dev->master_mutex);
612 mutex_destroy(&dev->ctxlist_mutex);
613 mutex_destroy(&dev->filelist_mutex);
614 mutex_destroy(&dev->struct_mutex);
615 kfree(dev->unique);
616}
617EXPORT_SYMBOL(drm_dev_fini);
618
619/**
Chris Wilsonb209aca2016-06-15 13:17:46 +0100620 * drm_dev_alloc - Allocate new DRM device
621 * @driver: DRM driver to allocate device for
622 * @parent: Parent device object
623 *
624 * Allocate and initialize a new DRM device. No device registration is done.
625 * Call drm_dev_register() to advertice the device to user space and register it
626 * with other core subsystems. This should be done last in the device
627 * initialization sequence to make sure userspace can't access an inconsistent
628 * state.
629 *
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530630 * The initial ref-count of the object is 1. Use drm_dev_get() and
631 * drm_dev_put() to take and drop further ref-counts.
Chris Wilsonb209aca2016-06-15 13:17:46 +0100632 *
633 * Note that for purely virtual devices @parent can be NULL.
634 *
Daniel Vetterea0dd852016-12-29 21:48:26 +0100635 * Drivers that wish to subclass or embed &struct drm_device into their
Chris Wilsonb209aca2016-06-15 13:17:46 +0100636 * own struct should look at using drm_dev_init() instead.
637 *
638 * RETURNS:
Tom Gundersen0f288602016-09-21 16:59:19 +0200639 * Pointer to new DRM device, or ERR_PTR on failure.
Chris Wilsonb209aca2016-06-15 13:17:46 +0100640 */
641struct drm_device *drm_dev_alloc(struct drm_driver *driver,
642 struct device *parent)
643{
644 struct drm_device *dev;
645 int ret;
646
647 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
648 if (!dev)
Tom Gundersen0f288602016-09-21 16:59:19 +0200649 return ERR_PTR(-ENOMEM);
Chris Wilsonb209aca2016-06-15 13:17:46 +0100650
651 ret = drm_dev_init(dev, driver, parent);
652 if (ret) {
653 kfree(dev);
Tom Gundersen0f288602016-09-21 16:59:19 +0200654 return ERR_PTR(ret);
Chris Wilsonb209aca2016-06-15 13:17:46 +0100655 }
656
657 return dev;
David Herrmann1bb72532013-10-02 11:23:34 +0200658}
659EXPORT_SYMBOL(drm_dev_alloc);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200660
David Herrmann099d1c22014-01-29 10:21:36 +0100661static void drm_dev_release(struct kref *ref)
David Herrmann0dc8fe52013-10-02 11:23:37 +0200662{
David Herrmann099d1c22014-01-29 10:21:36 +0100663 struct drm_device *dev = container_of(ref, struct drm_device, ref);
David Herrmann8f6599d2013-10-20 18:55:45 +0200664
Chris Wilsonf30c9252017-02-02 09:36:32 +0000665 if (dev->driver->release) {
666 dev->driver->release(dev);
667 } else {
668 drm_dev_fini(dev);
669 kfree(dev);
670 }
David Herrmann0dc8fe52013-10-02 11:23:37 +0200671}
David Herrmann099d1c22014-01-29 10:21:36 +0100672
673/**
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530674 * drm_dev_get - Take reference of a DRM device
David Herrmann099d1c22014-01-29 10:21:36 +0100675 * @dev: device to take reference of or NULL
676 *
677 * This increases the ref-count of @dev by one. You *must* already own a
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530678 * reference when calling this. Use drm_dev_put() to drop this reference
David Herrmann099d1c22014-01-29 10:21:36 +0100679 * again.
680 *
681 * This function never fails. However, this function does not provide *any*
682 * guarantee whether the device is alive or running. It only provides a
683 * reference to the object and the memory associated with it.
684 */
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530685void drm_dev_get(struct drm_device *dev)
David Herrmann099d1c22014-01-29 10:21:36 +0100686{
687 if (dev)
688 kref_get(&dev->ref);
689}
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530690EXPORT_SYMBOL(drm_dev_get);
David Herrmann099d1c22014-01-29 10:21:36 +0100691
692/**
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530693 * drm_dev_put - Drop reference of a DRM device
David Herrmann099d1c22014-01-29 10:21:36 +0100694 * @dev: device to drop reference of or NULL
695 *
696 * This decreases the ref-count of @dev by one. The device is destroyed if the
697 * ref-count drops to zero.
698 */
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530699void drm_dev_put(struct drm_device *dev)
David Herrmann099d1c22014-01-29 10:21:36 +0100700{
701 if (dev)
702 kref_put(&dev->ref, drm_dev_release);
703}
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530704EXPORT_SYMBOL(drm_dev_put);
705
706/**
707 * drm_dev_unref - Drop reference of a DRM device
708 * @dev: device to drop reference of or NULL
709 *
710 * This is a compatibility alias for drm_dev_put() and should not be used by new
711 * code.
712 */
713void drm_dev_unref(struct drm_device *dev)
714{
715 drm_dev_put(dev);
716}
David Herrmann099d1c22014-01-29 10:21:36 +0100717EXPORT_SYMBOL(drm_dev_unref);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200718
Daniel Vetter6449b082016-12-09 14:56:56 +0100719static int create_compat_control_link(struct drm_device *dev)
720{
721 struct drm_minor *minor;
722 char *name;
723 int ret;
724
725 if (!drm_core_check_feature(dev, DRIVER_MODESET))
726 return 0;
727
728 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
729 if (!minor)
730 return 0;
731
732 /*
733 * Some existing userspace out there uses the existing of the controlD*
734 * sysfs files to figure out whether it's a modeset driver. It only does
735 * readdir, hence a symlink is sufficient (and the least confusing
736 * option). Otherwise controlD* is entirely unused.
737 *
738 * Old controlD chardev have been allocated in the range
739 * 64-127.
740 */
741 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
742 if (!name)
743 return -ENOMEM;
744
745 ret = sysfs_create_link(minor->kdev->kobj.parent,
746 &minor->kdev->kobj,
747 name);
748
749 kfree(name);
750
751 return ret;
752}
753
754static void remove_compat_control_link(struct drm_device *dev)
755{
756 struct drm_minor *minor;
757 char *name;
758
759 if (!drm_core_check_feature(dev, DRIVER_MODESET))
760 return;
761
762 minor = *drm_minor_get_slot(dev, DRM_MINOR_PRIMARY);
763 if (!minor)
764 return;
765
766 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index);
767 if (!name)
768 return;
769
770 sysfs_remove_link(minor->kdev->kobj.parent, name);
771
772 kfree(name);
773}
774
David Herrmann0dc8fe52013-10-02 11:23:37 +0200775/**
David Herrmannc22f0ac2013-10-02 11:23:35 +0200776 * drm_dev_register - Register DRM device
777 * @dev: Device to register
Thierry Redingc6a1af8a2014-05-19 13:39:07 +0200778 * @flags: Flags passed to the driver's .load() function
David Herrmannc22f0ac2013-10-02 11:23:35 +0200779 *
780 * Register the DRM device @dev with the system, advertise device to user-space
781 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
Chris Wilsone28cd4d2016-06-17 09:25:17 +0100782 * previously.
David Herrmannc22f0ac2013-10-02 11:23:35 +0200783 *
784 * Never call this twice on any device!
785 *
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200786 * NOTE: To ensure backward compatibility with existing drivers method this
Daniel Vetteref40cbf92017-01-25 07:26:47 +0100787 * function calls the &drm_driver.load method after registering the device
788 * nodes, creating race conditions. Usage of the &drm_driver.load methods is
789 * therefore deprecated, drivers must perform all initialization before calling
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200790 * drm_dev_register().
791 *
David Herrmannc22f0ac2013-10-02 11:23:35 +0200792 * RETURNS:
793 * 0 on success, negative error code on failure.
794 */
795int drm_dev_register(struct drm_device *dev, unsigned long flags)
796{
Gabriel Krisman Bertazi75f6dfe2016-12-28 12:32:11 -0200797 struct drm_driver *driver = dev->driver;
David Herrmannc22f0ac2013-10-02 11:23:35 +0200798 int ret;
799
800 mutex_lock(&drm_global_mutex);
801
David Herrmannafcdbc82014-01-29 12:57:05 +0100802 ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200803 if (ret)
David Herrmann05b701f2014-01-29 12:43:56 +0100804 goto err_minors;
805
David Herrmannafcdbc82014-01-29 12:57:05 +0100806 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
David Herrmann05b701f2014-01-29 12:43:56 +0100807 if (ret)
808 goto err_minors;
809
David Herrmanna3ccc462016-08-03 20:04:25 +0200810 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
David Herrmann05b701f2014-01-29 12:43:56 +0100811 if (ret)
812 goto err_minors;
David Herrmannc22f0ac2013-10-02 11:23:35 +0200813
Daniel Vetter6449b082016-12-09 14:56:56 +0100814 ret = create_compat_control_link(dev);
815 if (ret)
816 goto err_minors;
817
Daniel Vettere6e7b482017-01-12 17:15:56 +0100818 dev->registered = true;
819
David Herrmannc22f0ac2013-10-02 11:23:35 +0200820 if (dev->driver->load) {
821 ret = dev->driver->load(dev, flags);
822 if (ret)
David Herrmann05b701f2014-01-29 12:43:56 +0100823 goto err_minors;
David Herrmannc22f0ac2013-10-02 11:23:35 +0200824 }
825
Chris Wilsonbee7fb12016-06-18 14:46:41 +0100826 if (drm_core_check_feature(dev, DRIVER_MODESET))
Benjamin Gaignard79190ea2016-06-21 16:37:09 +0200827 drm_modeset_register_all(dev);
Chris Wilsone28cd4d2016-06-17 09:25:17 +0100828
David Herrmannc22f0ac2013-10-02 11:23:35 +0200829 ret = 0;
Gabriel Krisman Bertazi75f6dfe2016-12-28 12:32:11 -0200830
831 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
832 driver->name, driver->major, driver->minor,
Chris Wilson60989092016-12-30 14:16:39 +0000833 driver->patchlevel, driver->date,
834 dev->dev ? dev_name(dev->dev) : "virtual device",
Gabriel Krisman Bertazi75f6dfe2016-12-28 12:32:11 -0200835 dev->primary->index);
836
David Herrmannc22f0ac2013-10-02 11:23:35 +0200837 goto out_unlock;
838
David Herrmann05b701f2014-01-29 12:43:56 +0100839err_minors:
Daniel Vetter6449b082016-12-09 14:56:56 +0100840 remove_compat_control_link(dev);
David Herrmanna3ccc462016-08-03 20:04:25 +0200841 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
David Herrmannafcdbc82014-01-29 12:57:05 +0100842 drm_minor_unregister(dev, DRM_MINOR_RENDER);
843 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200844out_unlock:
845 mutex_unlock(&drm_global_mutex);
846 return ret;
847}
848EXPORT_SYMBOL(drm_dev_register);
David Herrmannc3a49732013-10-02 11:23:38 +0200849
850/**
851 * drm_dev_unregister - Unregister DRM device
852 * @dev: Device to unregister
853 *
854 * Unregister the DRM device from the system. This does the reverse of
855 * drm_dev_register() but does not deallocate the device. The caller must call
Aishwarya Pant9a96f552017-09-26 13:58:49 +0530856 * drm_dev_put() to drop their final reference.
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200857 *
Daniel Vetterc07dcd62017-08-02 13:56:02 +0200858 * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
859 * which can be called while there are still open users of @dev.
860 *
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200861 * This should be called first in the device teardown code to make sure
862 * userspace can't access the device instance any more.
David Herrmannc3a49732013-10-02 11:23:38 +0200863 */
864void drm_dev_unregister(struct drm_device *dev)
865{
866 struct drm_map_list *r_list, *list_temp;
867
Daniel Vetter2e45eea2017-08-02 13:56:03 +0200868 if (drm_core_check_feature(dev, DRIVER_LEGACY))
869 drm_lastclose(dev);
David Herrmannc3a49732013-10-02 11:23:38 +0200870
Daniel Vettere6e7b482017-01-12 17:15:56 +0100871 dev->registered = false;
872
Chris Wilsonbee7fb12016-06-18 14:46:41 +0100873 if (drm_core_check_feature(dev, DRIVER_MODESET))
Benjamin Gaignard79190ea2016-06-21 16:37:09 +0200874 drm_modeset_unregister_all(dev);
Chris Wilsone28cd4d2016-06-17 09:25:17 +0100875
David Herrmannc3a49732013-10-02 11:23:38 +0200876 if (dev->driver->unload)
877 dev->driver->unload(dev);
878
Daniel Vetter4efafeb2013-12-11 11:34:38 +0100879 if (dev->agp)
880 drm_pci_agp_destroy(dev);
David Herrmannc3a49732013-10-02 11:23:38 +0200881
David Herrmannc3a49732013-10-02 11:23:38 +0200882 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
David Herrmann9fc5cde2014-08-29 12:12:28 +0200883 drm_legacy_rmmap(dev, r_list->map);
David Herrmannc3a49732013-10-02 11:23:38 +0200884
Daniel Vetter6449b082016-12-09 14:56:56 +0100885 remove_compat_control_link(dev);
David Herrmanna3ccc462016-08-03 20:04:25 +0200886 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
David Herrmannafcdbc82014-01-29 12:57:05 +0100887 drm_minor_unregister(dev, DRM_MINOR_RENDER);
888 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
David Herrmannc3a49732013-10-02 11:23:38 +0200889}
890EXPORT_SYMBOL(drm_dev_unregister);
Thierry Redingca8e2ad2014-04-11 15:23:00 +0200891
Gerd Hoffmanna6b5fac2016-10-04 11:09:35 +0200892/**
893 * drm_dev_set_unique - Set the unique name of a DRM device
894 * @dev: device of which to set the unique name
895 * @name: unique name
896 *
897 * Sets the unique name of a DRM device using the specified string. Drivers
898 * can use this at driver probe time if the unique name of the devices they
899 * drive is static.
900 *
901 * Return: 0 on success or a negative error code on failure.
902 */
903int drm_dev_set_unique(struct drm_device *dev, const char *name)
904{
905 kfree(dev->unique);
906 dev->unique = kstrdup(name, GFP_KERNEL);
907
908 return dev->unique ? 0 : -ENOMEM;
909}
910EXPORT_SYMBOL(drm_dev_set_unique);
911
David Herrmann1b7199f2014-07-23 12:29:56 +0200912/*
913 * DRM Core
914 * The DRM core module initializes all global DRM objects and makes them
915 * available to drivers. Once setup, drivers can probe their respective
916 * devices.
917 * Currently, core management includes:
918 * - The "DRM-Global" key/value database
919 * - Global ID management for connectors
920 * - DRM major number allocation
921 * - DRM minor management
922 * - DRM sysfs class
923 * - DRM debugfs root
924 *
925 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
926 * interface registered on a DRM device, you can request minor numbers from DRM
927 * core. DRM core takes care of major-number management and char-dev
928 * registration. A stub ->open() callback forwards any open() requests to the
929 * registered minor.
930 */
931
932static int drm_stub_open(struct inode *inode, struct file *filp)
933{
934 const struct file_operations *new_fops;
935 struct drm_minor *minor;
936 int err;
937
938 DRM_DEBUG("\n");
939
940 mutex_lock(&drm_global_mutex);
941 minor = drm_minor_acquire(iminor(inode));
942 if (IS_ERR(minor)) {
943 err = PTR_ERR(minor);
944 goto out_unlock;
945 }
946
947 new_fops = fops_get(minor->dev->driver->fops);
948 if (!new_fops) {
949 err = -ENODEV;
950 goto out_release;
951 }
952
953 replace_fops(filp, new_fops);
954 if (filp->f_op->open)
955 err = filp->f_op->open(inode, filp);
956 else
957 err = 0;
958
959out_release:
960 drm_minor_release(minor);
961out_unlock:
962 mutex_unlock(&drm_global_mutex);
963 return err;
964}
965
966static const struct file_operations drm_stub_fops = {
967 .owner = THIS_MODULE,
968 .open = drm_stub_open,
969 .llseek = noop_llseek,
970};
971
David Herrmann2cc107d2016-09-01 14:48:37 +0200972static void drm_core_exit(void)
973{
974 unregister_chrdev(DRM_MAJOR, "drm");
975 debugfs_remove(drm_debugfs_root);
976 drm_sysfs_destroy();
977 idr_destroy(&drm_minors_idr);
978 drm_connector_ida_destroy();
979 drm_global_release();
980}
981
David Herrmann1b7199f2014-07-23 12:29:56 +0200982static int __init drm_core_init(void)
983{
David Herrmann2cc107d2016-09-01 14:48:37 +0200984 int ret;
David Herrmann1b7199f2014-07-23 12:29:56 +0200985
986 drm_global_init();
987 drm_connector_ida_init();
988 idr_init(&drm_minors_idr);
989
David Herrmannfcc90212015-09-09 14:21:30 +0200990 ret = drm_sysfs_init();
991 if (ret < 0) {
David Herrmann2cc107d2016-09-01 14:48:37 +0200992 DRM_ERROR("Cannot create DRM class: %d\n", ret);
993 goto error;
David Herrmann1b7199f2014-07-23 12:29:56 +0200994 }
995
996 drm_debugfs_root = debugfs_create_dir("dri", NULL);
997 if (!drm_debugfs_root) {
David Herrmann2cc107d2016-09-01 14:48:37 +0200998 ret = -ENOMEM;
999 DRM_ERROR("Cannot create debugfs-root: %d\n", ret);
1000 goto error;
David Herrmann1b7199f2014-07-23 12:29:56 +02001001 }
1002
David Herrmann2cc107d2016-09-01 14:48:37 +02001003 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
1004 if (ret < 0)
1005 goto error;
1006
Alexandru Moise371c2272017-07-08 23:43:52 +02001007 drm_core_init_complete = true;
1008
Chris Wilsone82dfa02016-12-29 13:37:29 +00001009 DRM_DEBUG("Initialized\n");
David Herrmann1b7199f2014-07-23 12:29:56 +02001010 return 0;
David Herrmann1b7199f2014-07-23 12:29:56 +02001011
David Herrmann2cc107d2016-09-01 14:48:37 +02001012error:
1013 drm_core_exit();
David Herrmann1b7199f2014-07-23 12:29:56 +02001014 return ret;
1015}
1016
David Herrmann1b7199f2014-07-23 12:29:56 +02001017module_init(drm_core_init);
1018module_exit(drm_core_exit);