blob: 8ad9fe68335f5d3c6e16eb9e99e77907edca0e69 [file] [log] [blame]
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +02001/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include <drm/drmP.h>
8
9#include <linux/component.h>
10#include <linux/debugfs.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/of_platform.h>
14
Benjamin Gaignardde4b00b2015-03-19 13:35:16 +010015#include <drm/drm_atomic.h>
16#include <drm/drm_atomic_helper.h>
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +020017#include <drm/drm_crtc_helper.h>
18#include <drm/drm_gem_cma_helper.h>
19#include <drm/drm_fb_cma_helper.h>
20
21#include "sti_drm_drv.h"
22#include "sti_drm_crtc.h"
23
24#define DRIVER_NAME "sti"
25#define DRIVER_DESC "STMicroelectronics SoC DRM"
26#define DRIVER_DATE "20140601"
27#define DRIVER_MAJOR 1
28#define DRIVER_MINOR 0
29
30#define STI_MAX_FB_HEIGHT 4096
31#define STI_MAX_FB_WIDTH 4096
32
Benjamin Gaignardde4b00b2015-03-19 13:35:16 +010033static void sti_drm_atomic_schedule(struct sti_drm_private *private,
34 struct drm_atomic_state *state)
35{
36 private->commit.state = state;
37 schedule_work(&private->commit.work);
38}
39
40static void sti_drm_atomic_complete(struct sti_drm_private *private,
41 struct drm_atomic_state *state)
42{
43 struct drm_device *drm = private->drm_dev;
44
45 /*
46 * Everything below can be run asynchronously without the need to grab
47 * any modeset locks at all under one condition: It must be guaranteed
48 * that the asynchronous work has either been cancelled (if the driver
49 * supports it, which at least requires that the framebuffers get
50 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
51 * before the new state gets committed on the software side with
52 * drm_atomic_helper_swap_state().
53 *
54 * This scheme allows new atomic state updates to be prepared and
55 * checked in parallel to the asynchronous completion of the previous
56 * update. Which is important since compositors need to figure out the
57 * composition of the next frame right after having submitted the
58 * current layout.
59 */
60
61 drm_atomic_helper_commit_modeset_disables(drm, state);
62 drm_atomic_helper_commit_planes(drm, state);
63 drm_atomic_helper_commit_modeset_enables(drm, state);
64
65 drm_atomic_helper_wait_for_vblanks(drm, state);
66
67 drm_atomic_helper_cleanup_planes(drm, state);
68 drm_atomic_state_free(state);
69}
70
71static void sti_drm_atomic_work(struct work_struct *work)
72{
73 struct sti_drm_private *private = container_of(work,
74 struct sti_drm_private, commit.work);
75
76 sti_drm_atomic_complete(private, private->commit.state);
77}
78
79static int sti_drm_atomic_commit(struct drm_device *drm,
80 struct drm_atomic_state *state, bool async)
81{
82 struct sti_drm_private *private = drm->dev_private;
83 int err;
84
85 err = drm_atomic_helper_prepare_planes(drm, state);
86 if (err)
87 return err;
88
89 /* serialize outstanding asynchronous commits */
90 mutex_lock(&private->commit.lock);
91 flush_work(&private->commit.work);
92
93 /*
94 * This is the point of no return - everything below never fails except
95 * when the hw goes bonghits. Which means we can commit the new state on
96 * the software side now.
97 */
98
99 drm_atomic_helper_swap_state(drm, state);
100
101 if (async)
102 sti_drm_atomic_schedule(private, state);
103 else
104 sti_drm_atomic_complete(private, state);
105
106 mutex_unlock(&private->commit.lock);
107 return 0;
108}
109
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200110static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
111 .fb_create = drm_fb_cma_create,
Benjamin Gaignardde4b00b2015-03-19 13:35:16 +0100112 .atomic_check = drm_atomic_helper_check,
113 .atomic_commit = sti_drm_atomic_commit,
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200114};
115
116static void sti_drm_mode_config_init(struct drm_device *dev)
117{
118 dev->mode_config.min_width = 0;
119 dev->mode_config.min_height = 0;
120
121 /*
122 * set max width and height as default value.
123 * this value would be used to check framebuffer size limitation
124 * at drm_mode_addfb().
125 */
126 dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
127 dev->mode_config.max_height = STI_MAX_FB_WIDTH;
128
129 dev->mode_config.funcs = &sti_drm_mode_config_funcs;
130}
131
132static int sti_drm_load(struct drm_device *dev, unsigned long flags)
133{
134 struct sti_drm_private *private;
135 int ret;
136
137 private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
138 if (!private) {
139 DRM_ERROR("Failed to allocate private\n");
140 return -ENOMEM;
141 }
142 dev->dev_private = (void *)private;
143 private->drm_dev = dev;
144
Benjamin Gaignardde4b00b2015-03-19 13:35:16 +0100145 mutex_init(&private->commit.lock);
146 INIT_WORK(&private->commit.work, sti_drm_atomic_work);
147
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200148 drm_mode_config_init(dev);
149 drm_kms_helper_poll_init(dev);
150
151 sti_drm_mode_config_init(dev);
152
153 ret = component_bind_all(dev->dev, dev);
Benjamin Gaignardf78e7722014-12-11 13:35:29 +0100154 if (ret) {
155 drm_kms_helper_poll_fini(dev);
156 drm_mode_config_cleanup(dev);
157 kfree(private);
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200158 return ret;
Benjamin Gaignardf78e7722014-12-11 13:35:29 +0100159 }
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200160
Benjamin Gaignardde4b00b2015-03-19 13:35:16 +0100161 drm_mode_config_reset(dev);
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200162
163#ifdef CONFIG_DRM_STI_FBDEV
164 drm_fbdev_cma_init(dev, 32,
165 dev->mode_config.num_crtc,
166 dev->mode_config.num_connector);
167#endif
168 return 0;
169}
170
171static const struct file_operations sti_drm_driver_fops = {
172 .owner = THIS_MODULE,
173 .open = drm_open,
174 .mmap = drm_gem_cma_mmap,
175 .poll = drm_poll,
176 .read = drm_read,
177 .unlocked_ioctl = drm_ioctl,
178#ifdef CONFIG_COMPAT
179 .compat_ioctl = drm_compat_ioctl,
180#endif
181 .release = drm_release,
182};
183
184static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
185 struct drm_gem_object *obj,
186 int flags)
187{
188 /* we want to be able to write in mmapped buffer */
189 flags |= O_RDWR;
190 return drm_gem_prime_export(dev, obj, flags);
191}
192
193static struct drm_driver sti_drm_driver = {
194 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
195 DRIVER_GEM | DRIVER_PRIME,
196 .load = sti_drm_load,
197 .gem_free_object = drm_gem_cma_free_object,
198 .gem_vm_ops = &drm_gem_cma_vm_ops,
199 .dumb_create = drm_gem_cma_dumb_create,
200 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
201 .dumb_destroy = drm_gem_dumb_destroy,
202 .fops = &sti_drm_driver_fops,
203
204 .get_vblank_counter = drm_vblank_count,
205 .enable_vblank = sti_drm_crtc_enable_vblank,
206 .disable_vblank = sti_drm_crtc_disable_vblank,
207
208 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
209 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
210 .gem_prime_export = sti_drm_gem_prime_export,
211 .gem_prime_import = drm_gem_prime_import,
212 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
213 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
214 .gem_prime_vmap = drm_gem_cma_prime_vmap,
215 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
216 .gem_prime_mmap = drm_gem_cma_prime_mmap,
217
218 .name = DRIVER_NAME,
219 .desc = DRIVER_DESC,
220 .date = DRIVER_DATE,
221 .major = DRIVER_MAJOR,
222 .minor = DRIVER_MINOR,
223};
224
225static int compare_of(struct device *dev, void *data)
226{
227 return dev->of_node == data;
228}
229
230static int sti_drm_bind(struct device *dev)
231{
232 return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
233}
234
235static void sti_drm_unbind(struct device *dev)
236{
237 drm_put_dev(dev_get_drvdata(dev));
238}
239
240static const struct component_master_ops sti_drm_ops = {
241 .bind = sti_drm_bind,
242 .unbind = sti_drm_unbind,
243};
244
Benjamin Gaignard53bdcf52015-07-17 12:06:11 +0200245static int sti_drm_platform_probe(struct platform_device *pdev)
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200246{
247 struct device *dev = &pdev->dev;
Benjamin Gaignard53bdcf52015-07-17 12:06:11 +0200248 struct device_node *node = dev->of_node;
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200249 struct device_node *child_np;
250 struct component_match *match = NULL;
251
252 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
253
Benjamin Gaignard53bdcf52015-07-17 12:06:11 +0200254 of_platform_populate(node, NULL, NULL, dev);
255
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200256 child_np = of_get_next_available_child(node, NULL);
257
258 while (child_np) {
259 component_match_add(dev, &match, compare_of, child_np);
260 of_node_put(child_np);
261 child_np = of_get_next_available_child(node, child_np);
262 }
263
264 return component_master_add_with_match(dev, &sti_drm_ops, match);
265}
266
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200267static int sti_drm_platform_remove(struct platform_device *pdev)
268{
Benjamin Gaignard53bdcf52015-07-17 12:06:11 +0200269 component_master_del(&pdev->dev, &sti_drm_ops);
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200270 of_platform_depopulate(&pdev->dev);
Benjamin Gaignard53bdcf52015-07-17 12:06:11 +0200271
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200272 return 0;
273}
274
275static const struct of_device_id sti_drm_dt_ids[] = {
276 { .compatible = "st,sti-display-subsystem", },
277 { /* end node */ },
278};
279MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
280
281static struct platform_driver sti_drm_platform_driver = {
282 .probe = sti_drm_platform_probe,
283 .remove = sti_drm_platform_remove,
284 .driver = {
Benjamin Gaignard9bbf86f2014-07-31 09:39:11 +0200285 .name = DRIVER_NAME,
286 .of_match_table = sti_drm_dt_ids,
287 },
288};
289
290module_platform_driver(sti_drm_platform_driver);
291
292MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
293MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
294MODULE_LICENSE("GPL");