blob: 873f010d9616f702deb3d59947831c28a899b853 [file] [log] [blame]
Dave Airlie53209182010-12-15 07:14:24 +10001/*
2 * Copyright (C) 2012 Red Hat
3 *
4 * based in parts on udlfb.c:
5 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
6 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
7 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License v2. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
David Howells760285e2012-10-02 18:01:07 +010013#include <drm/drmP.h>
Dave Airlie53209182010-12-15 07:14:24 +100014#include "udl_drv.h"
15
16/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
17#define BULK_SIZE 512
18
Jamie Lentind1c151dc2016-08-22 23:17:34 +010019#define NR_USB_REQUEST_CHANNEL 0x12
20
Dave Airlie53209182010-12-15 07:14:24 +100021#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
22#define WRITES_IN_FLIGHT (4)
23#define MAX_VENDOR_DESCRIPTOR_SIZE 256
24
25#define GET_URB_TIMEOUT HZ
26#define FREE_URB_TIMEOUT (HZ*2)
27
28static int udl_parse_vendor_descriptor(struct drm_device *dev,
29 struct usb_device *usbdev)
30{
31 struct udl_device *udl = dev->dev_private;
32 char *desc;
33 char *buf;
34 char *desc_end;
35
36 u8 total_len = 0;
37
38 buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
39 if (!buf)
40 return false;
41 desc = buf;
42
43 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
44 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
45 if (total_len > 5) {
Andy Shevchenko08fcd722013-08-02 14:09:24 +030046 DRM_INFO("vendor descriptor length:%x data:%11ph\n",
47 total_len, desc);
Dave Airlie53209182010-12-15 07:14:24 +100048
49 if ((desc[0] != total_len) || /* descriptor length */
50 (desc[1] != 0x5f) || /* vendor descriptor type */
51 (desc[2] != 0x01) || /* version (2 bytes) */
52 (desc[3] != 0x00) ||
53 (desc[4] != total_len - 2)) /* length after type */
54 goto unrecognized;
55
56 desc_end = desc + total_len;
57 desc += 5; /* the fixed header we've already parsed */
58
59 while (desc < desc_end) {
60 u8 length;
61 u16 key;
62
Dave Airlied42f0342012-06-25 07:32:14 +010063 key = le16_to_cpu(*((u16 *) desc));
Dave Airlie53209182010-12-15 07:14:24 +100064 desc += sizeof(u16);
65 length = *desc;
66 desc++;
67
68 switch (key) {
69 case 0x0200: { /* max_area */
70 u32 max_area;
71 max_area = le32_to_cpu(*((u32 *)desc));
72 DRM_DEBUG("DL chip limited to %d pixel modes\n",
73 max_area);
74 udl->sku_pixel_limit = max_area;
75 break;
76 }
77 default:
78 break;
79 }
80 desc += length;
81 }
82 }
83
84 goto success;
85
86unrecognized:
87 /* allow udlfb to load for now even if firmware unrecognized */
88 DRM_ERROR("Unrecognized vendor firmware descriptor\n");
89
90success:
91 kfree(buf);
92 return true;
93}
94
Jamie Lentind1c151dc2016-08-22 23:17:34 +010095/*
96 * Need to ensure a channel is selected before submitting URBs
97 */
98static int udl_select_std_channel(struct udl_device *udl)
99{
100 int ret;
Dave Airliee5581fe2016-11-08 16:38:00 +1000101 static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
102 0x1C, 0x88, 0x5E, 0x15,
103 0x60, 0xFE, 0xC6, 0x97,
104 0x16, 0x3D, 0x47, 0xF2};
105 void *sendbuf;
106
107 sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
108 if (!sendbuf)
109 return -ENOMEM;
Jamie Lentind1c151dc2016-08-22 23:17:34 +0100110
111 ret = usb_control_msg(udl->udev,
112 usb_sndctrlpipe(udl->udev, 0),
113 NR_USB_REQUEST_CHANNEL,
114 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
Dave Airliee5581fe2016-11-08 16:38:00 +1000115 sendbuf, sizeof(set_def_chn),
Jamie Lentind1c151dc2016-08-22 23:17:34 +0100116 USB_CTRL_SET_TIMEOUT);
Dave Airliee5581fe2016-11-08 16:38:00 +1000117 kfree(sendbuf);
Jamie Lentind1c151dc2016-08-22 23:17:34 +0100118 return ret < 0 ? ret : 0;
119}
120
Dave Airlie53209182010-12-15 07:14:24 +1000121static void udl_release_urb_work(struct work_struct *work)
122{
123 struct urb_node *unode = container_of(work, struct urb_node,
124 release_urb_work.work);
125
126 up(&unode->dev->urbs.limit_sem);
127}
128
129void udl_urb_completion(struct urb *urb)
130{
131 struct urb_node *unode = urb->context;
132 struct udl_device *udl = unode->dev;
133 unsigned long flags;
134
135 /* sync/async unlink faults aren't errors */
136 if (urb->status) {
137 if (!(urb->status == -ENOENT ||
138 urb->status == -ECONNRESET ||
139 urb->status == -ESHUTDOWN)) {
140 DRM_ERROR("%s - nonzero write bulk status received: %d\n",
141 __func__, urb->status);
142 atomic_set(&udl->lost_pixels, 1);
143 }
144 }
145
146 urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
147
148 spin_lock_irqsave(&udl->urbs.lock, flags);
149 list_add_tail(&unode->entry, &udl->urbs.list);
150 udl->urbs.available++;
151 spin_unlock_irqrestore(&udl->urbs.lock, flags);
152
153#if 0
154 /*
155 * When using fb_defio, we deadlock if up() is called
156 * while another is waiting. So queue to another process.
157 */
158 if (fb_defio)
159 schedule_delayed_work(&unode->release_urb_work, 0);
160 else
161#endif
162 up(&udl->urbs.limit_sem);
163}
164
165static void udl_free_urb_list(struct drm_device *dev)
166{
167 struct udl_device *udl = dev->dev_private;
168 int count = udl->urbs.count;
169 struct list_head *node;
170 struct urb_node *unode;
171 struct urb *urb;
172 int ret;
173 unsigned long flags;
174
175 DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
176
177 /* keep waiting and freeing, until we've got 'em all */
178 while (count--) {
179
180 /* Getting interrupted means a leak, but ok at shutdown*/
181 ret = down_interruptible(&udl->urbs.limit_sem);
182 if (ret)
183 break;
184
185 spin_lock_irqsave(&udl->urbs.lock, flags);
186
187 node = udl->urbs.list.next; /* have reserved one with sem */
188 list_del_init(node);
189
190 spin_unlock_irqrestore(&udl->urbs.lock, flags);
191
192 unode = list_entry(node, struct urb_node, entry);
193 urb = unode->urb;
194
195 /* Free each separately allocated piece */
196 usb_free_coherent(urb->dev, udl->urbs.size,
197 urb->transfer_buffer, urb->transfer_dma);
198 usb_free_urb(urb);
199 kfree(node);
200 }
201 udl->urbs.count = 0;
202}
203
204static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
205{
206 struct udl_device *udl = dev->dev_private;
207 int i = 0;
208 struct urb *urb;
209 struct urb_node *unode;
210 char *buf;
211
212 spin_lock_init(&udl->urbs.lock);
213
214 udl->urbs.size = size;
215 INIT_LIST_HEAD(&udl->urbs.list);
216
217 while (i < count) {
218 unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
219 if (!unode)
220 break;
221 unode->dev = udl;
222
223 INIT_DELAYED_WORK(&unode->release_urb_work,
224 udl_release_urb_work);
225
226 urb = usb_alloc_urb(0, GFP_KERNEL);
227 if (!urb) {
228 kfree(unode);
229 break;
230 }
231 unode->urb = urb;
232
David Herrmannd4f68a72014-08-29 12:12:45 +0200233 buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
Dave Airlie53209182010-12-15 07:14:24 +1000234 &urb->transfer_dma);
235 if (!buf) {
236 kfree(unode);
237 usb_free_urb(urb);
238 break;
239 }
240
241 /* urb->transfer_buffer_length set to actual before submit */
David Herrmannd4f68a72014-08-29 12:12:45 +0200242 usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1),
Dave Airlie53209182010-12-15 07:14:24 +1000243 buf, size, udl_urb_completion, unode);
244 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
245
246 list_add_tail(&unode->entry, &udl->urbs.list);
247
248 i++;
249 }
250
251 sema_init(&udl->urbs.limit_sem, i);
252 udl->urbs.count = i;
253 udl->urbs.available = i;
254
255 DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
256
257 return i;
258}
259
260struct urb *udl_get_urb(struct drm_device *dev)
261{
262 struct udl_device *udl = dev->dev_private;
263 int ret = 0;
264 struct list_head *entry;
265 struct urb_node *unode;
266 struct urb *urb = NULL;
267 unsigned long flags;
268
269 /* Wait for an in-flight buffer to complete and get re-queued */
270 ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
271 if (ret) {
272 atomic_set(&udl->lost_pixels, 1);
273 DRM_INFO("wait for urb interrupted: %x available: %d\n",
274 ret, udl->urbs.available);
275 goto error;
276 }
277
278 spin_lock_irqsave(&udl->urbs.lock, flags);
279
280 BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
281 entry = udl->urbs.list.next;
282 list_del_init(entry);
283 udl->urbs.available--;
284
285 spin_unlock_irqrestore(&udl->urbs.lock, flags);
286
287 unode = list_entry(entry, struct urb_node, entry);
288 urb = unode->urb;
289
290error:
291 return urb;
292}
293
294int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
295{
296 struct udl_device *udl = dev->dev_private;
297 int ret;
298
299 BUG_ON(len > udl->urbs.size);
300
301 urb->transfer_buffer_length = len; /* set to actual payload len */
302 ret = usb_submit_urb(urb, GFP_ATOMIC);
303 if (ret) {
304 udl_urb_completion(urb); /* because no one else will */
305 atomic_set(&udl->lost_pixels, 1);
306 DRM_ERROR("usb_submit_urb error %x\n", ret);
307 }
308 return ret;
309}
310
311int udl_driver_load(struct drm_device *dev, unsigned long flags)
312{
David Herrmannd4f68a72014-08-29 12:12:45 +0200313 struct usb_device *udev = (void*)flags;
Dave Airlie53209182010-12-15 07:14:24 +1000314 struct udl_device *udl;
Oliver Neukum737583f2014-05-19 13:50:22 +0200315 int ret = -ENOMEM;
Dave Airlie53209182010-12-15 07:14:24 +1000316
317 DRM_DEBUG("\n");
318 udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
319 if (!udl)
320 return -ENOMEM;
321
David Herrmannd4f68a72014-08-29 12:12:45 +0200322 udl->udev = udev;
Dave Airlie53209182010-12-15 07:14:24 +1000323 udl->ddev = dev;
324 dev->dev_private = udl;
325
David Herrmannd4f68a72014-08-29 12:12:45 +0200326 if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
Daniel Vettere39a52d2014-04-05 10:25:18 +0200327 ret = -ENODEV;
Dave Airlie53209182010-12-15 07:14:24 +1000328 DRM_ERROR("firmware not recognized. Assume incompatible device\n");
329 goto err;
330 }
331
Jamie Lentind1c151dc2016-08-22 23:17:34 +0100332 if (udl_select_std_channel(udl))
333 DRM_ERROR("Selecting channel failed\n");
334
Dave Airlie53209182010-12-15 07:14:24 +1000335 if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
Dave Airlie53209182010-12-15 07:14:24 +1000336 DRM_ERROR("udl_alloc_urb_list failed\n");
337 goto err;
338 }
339
340 DRM_DEBUG("\n");
341 ret = udl_modeset_init(dev);
Stéphane Marchesin26507b02014-07-02 15:13:42 -0700342 if (ret)
343 goto err;
Dave Airlie53209182010-12-15 07:14:24 +1000344
345 ret = udl_fbdev_init(dev);
Stéphane Marchesin26507b02014-07-02 15:13:42 -0700346 if (ret)
347 goto err;
348
349 ret = drm_vblank_init(dev, 1);
350 if (ret)
351 goto err_fb;
352
Dave Airlie53209182010-12-15 07:14:24 +1000353 return 0;
Stéphane Marchesin26507b02014-07-02 15:13:42 -0700354err_fb:
355 udl_fbdev_cleanup(dev);
Dave Airlie53209182010-12-15 07:14:24 +1000356err:
Stéphane Marchesin26507b02014-07-02 15:13:42 -0700357 if (udl->urbs.count)
358 udl_free_urb_list(dev);
Dave Airlie53209182010-12-15 07:14:24 +1000359 kfree(udl);
360 DRM_ERROR("%d\n", ret);
361 return ret;
362}
363
364int udl_drop_usb(struct drm_device *dev)
365{
366 udl_free_urb_list(dev);
367 return 0;
368}
369
370int udl_driver_unload(struct drm_device *dev)
371{
372 struct udl_device *udl = dev->dev_private;
373
Stéphane Marchesin26507b02014-07-02 15:13:42 -0700374 drm_vblank_cleanup(dev);
375
Dave Airlie53209182010-12-15 07:14:24 +1000376 if (udl->urbs.count)
377 udl_free_urb_list(dev);
378
379 udl_fbdev_cleanup(dev);
380 udl_modeset_cleanup(dev);
381 kfree(udl);
382 return 0;
383}