blob: be50cef645d8aadb04d49314d86ed7947c182572 [file] [log] [blame]
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -06001/*
2 * Copyright (C) 2003-2008 Takahiro Hirofuchi
3 *
4 * This is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
17 * USA.
18 */
19
Arnd Bergmann9720b4b2011-03-02 00:13:05 +010020#include <linux/kthread.h>
matt mooney7aaacb42011-05-11 22:33:43 -070021#include <linux/socket.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060023#include "usbip_common.h"
24#include "stub.h"
25
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060026static void stub_free_priv_and_urb(struct stub_priv *priv)
27{
28 struct urb *urb = priv->urb;
29
30 kfree(urb->setup_packet);
Michael Grzeschik19adf932017-05-22 13:02:44 +020031 urb->setup_packet = NULL;
32
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060033 kfree(urb->transfer_buffer);
Michael Grzeschik19adf932017-05-22 13:02:44 +020034 urb->transfer_buffer = NULL;
35
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060036 list_del(&priv->list);
37 kmem_cache_free(stub_priv_cache, priv);
38 usb_free_urb(urb);
39}
40
41/* be in spin_lock_irqsave(&sdev->priv_lock, flags) */
42void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum,
43 __u32 status)
44{
45 struct stub_unlink *unlink;
46
47 unlink = kzalloc(sizeof(struct stub_unlink), GFP_ATOMIC);
48 if (!unlink) {
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060049 usbip_event_add(&sdev->ud, VDEV_EVENT_ERROR_MALLOC);
50 return;
51 }
52
53 unlink->seqnum = seqnum;
54 unlink->status = status;
55
56 list_add_tail(&unlink->list, &sdev->unlink_tx);
57}
58
59/**
60 * stub_complete - completion handler of a usbip urb
61 * @urb: pointer to the urb completed
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060062 *
63 * When a urb has completed, the USB core driver calls this function mostly in
64 * the interrupt context. To return the result of a urb, the completed urb is
65 * linked to the pending list of returning.
66 *
67 */
68void stub_complete(struct urb *urb)
69{
70 struct stub_priv *priv = (struct stub_priv *) urb->context;
71 struct stub_device *sdev = priv->sdev;
72 unsigned long flags;
73
Brian G. Merrellb8868e42009-07-21 00:46:13 -060074 usbip_dbg_stub_tx("complete! status %d\n", urb->status);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060075
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060076 switch (urb->status) {
77 case 0:
78 /* OK */
79 break;
80 case -ENOENT:
Cédric Cabessa6bb3ee62014-03-19 23:04:56 +010081 dev_info(&urb->dev->dev,
82 "stopped by a call to usb_kill_urb() because of cleaning up a virtual connection\n");
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060083 return;
84 case -ECONNRESET:
Cédric Cabessa6bb3ee62014-03-19 23:04:56 +010085 dev_info(&urb->dev->dev,
86 "unlinked by a call to usb_unlink_urb()\n");
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060087 break;
88 case -EPIPE:
matt mooney1a4b6f62011-05-19 16:47:32 -070089 dev_info(&urb->dev->dev, "endpoint %d is stalled\n",
90 usb_pipeendpoint(urb->pipe));
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060091 break;
92 case -ESHUTDOWN:
matt mooney1a4b6f62011-05-19 16:47:32 -070093 dev_info(&urb->dev->dev, "device removed?\n");
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060094 break;
95 default:
Cédric Cabessa6bb3ee62014-03-19 23:04:56 +010096 dev_info(&urb->dev->dev,
97 "urb completion with non-zero status %d\n",
98 urb->status);
matt mooney49aecef2011-05-06 03:47:54 -070099 break;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600100 }
101
102 /* link a urb to the queue of tx. */
103 spin_lock_irqsave(&sdev->priv_lock, flags);
Nobuo Iwata6dc38da2016-04-27 15:35:53 +0900104 if (sdev->ud.tcp_socket == NULL) {
105 usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
106 /* It will be freed in stub_device_cleanup_urbs(). */
107 } else if (priv->unlinking) {
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600108 stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
109 stub_free_priv_and_urb(priv);
matt mooney87352762011-05-19 21:36:56 -0700110 } else {
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600111 list_move_tail(&priv->list, &sdev->priv_tx);
matt mooney87352762011-05-19 21:36:56 -0700112 }
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600113 spin_unlock_irqrestore(&sdev->priv_lock, flags);
114
115 /* wake up tx_thread */
116 wake_up(&sdev->tx_waitq);
117}
118
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600119static inline void setup_base_pdu(struct usbip_header_basic *base,
matt mooneyc6956c92011-05-06 03:47:44 -0700120 __u32 command, __u32 seqnum)
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600121{
matt mooney87352762011-05-19 21:36:56 -0700122 base->command = command;
123 base->seqnum = seqnum;
124 base->devid = 0;
125 base->ep = 0;
matt mooneyc6956c92011-05-06 03:47:44 -0700126 base->direction = 0;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600127}
128
129static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urb *urb)
130{
131 struct stub_priv *priv = (struct stub_priv *) urb->context;
132
133 setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, priv->seqnum);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600134 usbip_pack_pdu(rpdu, urb, USBIP_RET_SUBMIT, 1);
135}
136
137static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
matt mooneyc6956c92011-05-06 03:47:44 -0700138 struct stub_unlink *unlink)
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600139{
140 setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600141 rpdu->u.ret_unlink.status = unlink->status;
142}
143
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600144static struct stub_priv *dequeue_from_priv_tx(struct stub_device *sdev)
145{
146 unsigned long flags;
147 struct stub_priv *priv, *tmp;
148
149 spin_lock_irqsave(&sdev->priv_lock, flags);
150
151 list_for_each_entry_safe(priv, tmp, &sdev->priv_tx, list) {
152 list_move_tail(&priv->list, &sdev->priv_free);
153 spin_unlock_irqrestore(&sdev->priv_lock, flags);
154 return priv;
155 }
156
157 spin_unlock_irqrestore(&sdev->priv_lock, flags);
158
159 return NULL;
160}
161
162static int stub_send_ret_submit(struct stub_device *sdev)
163{
164 unsigned long flags;
165 struct stub_priv *priv, *tmp;
166
167 struct msghdr msg;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600168 size_t txsize;
169
170 size_t total_size = 0;
171
172 while ((priv = dequeue_from_priv_tx(sdev)) != NULL) {
173 int ret;
174 struct urb *urb = priv->urb;
175 struct usbip_header pdu_header;
Bart Westgeest36ac9b02012-10-10 13:34:25 -0400176 struct usbip_iso_packet_descriptor *iso_buffer = NULL;
Arjan Mels28276a22011-04-05 20:26:59 +0200177 struct kvec *iov = NULL;
178 int iovnum = 0;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600179
180 txsize = 0;
181 memset(&pdu_header, 0, sizeof(pdu_header));
182 memset(&msg, 0, sizeof(msg));
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600183
Arjan Mels28276a22011-04-05 20:26:59 +0200184 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
185 iovnum = 2 + urb->number_of_packets;
186 else
187 iovnum = 2;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600188
Djordje Zekovic6baf1392014-05-23 14:18:03 +0000189 iov = kcalloc(iovnum, sizeof(struct kvec), GFP_KERNEL);
Arjan Mels28276a22011-04-05 20:26:59 +0200190
191 if (!iov) {
192 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC);
193 return -1;
194 }
195
196 iovnum = 0;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600197
198 /* 1. setup usbip_header */
199 setup_ret_submit_pdu(&pdu_header, urb);
Arjan Mels28276a22011-04-05 20:26:59 +0200200 usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
matt mooneyc6956c92011-05-06 03:47:44 -0700201 pdu_header.base.seqnum, urb);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600202 usbip_header_correct_endian(&pdu_header, 1);
203
Arjan Mels28276a22011-04-05 20:26:59 +0200204 iov[iovnum].iov_base = &pdu_header;
205 iov[iovnum].iov_len = sizeof(pdu_header);
206 iovnum++;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600207 txsize += sizeof(pdu_header);
208
209 /* 2. setup transfer buffer */
Arjan Mels28276a22011-04-05 20:26:59 +0200210 if (usb_pipein(urb->pipe) &&
matt mooneyc6956c92011-05-06 03:47:44 -0700211 usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS &&
212 urb->actual_length > 0) {
Arjan Mels28276a22011-04-05 20:26:59 +0200213 iov[iovnum].iov_base = urb->transfer_buffer;
214 iov[iovnum].iov_len = urb->actual_length;
215 iovnum++;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600216 txsize += urb->actual_length;
Arjan Mels28276a22011-04-05 20:26:59 +0200217 } else if (usb_pipein(urb->pipe) &&
matt mooneyc6956c92011-05-06 03:47:44 -0700218 usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
Arjan Mels28276a22011-04-05 20:26:59 +0200219 /*
220 * For isochronous packets: actual length is the sum of
221 * the actual length of the individual, packets, but as
222 * the packet offsets are not changed there will be
223 * padding between the packets. To optimally use the
224 * bandwidth the padding is not transmitted.
225 */
226
227 int i;
Pawel Lebioda3eed8c02014-05-14 19:20:27 +0200228
Arjan Mels28276a22011-04-05 20:26:59 +0200229 for (i = 0; i < urb->number_of_packets; i++) {
matt mooneyc6956c92011-05-06 03:47:44 -0700230 iov[iovnum].iov_base = urb->transfer_buffer +
231 urb->iso_frame_desc[i].offset;
232 iov[iovnum].iov_len =
233 urb->iso_frame_desc[i].actual_length;
Arjan Mels28276a22011-04-05 20:26:59 +0200234 iovnum++;
235 txsize += urb->iso_frame_desc[i].actual_length;
236 }
237
238 if (txsize != sizeof(pdu_header) + urb->actual_length) {
Alexander Popov8c7003a32016-04-28 13:07:22 +0300239 dev_err(&sdev->udev->dev,
Cédric Cabessa6bb3ee62014-03-19 23:04:56 +0100240 "actual length of urb %d does not match iso packet sizes %zu\n",
matt mooneyc6956c92011-05-06 03:47:44 -0700241 urb->actual_length,
242 txsize-sizeof(pdu_header));
Arjan Mels28276a22011-04-05 20:26:59 +0200243 kfree(iov);
matt mooneyc6956c92011-05-06 03:47:44 -0700244 usbip_event_add(&sdev->ud,
245 SDEV_EVENT_ERROR_TCP);
Arjan Mels28276a22011-04-05 20:26:59 +0200246 return -1;
247 }
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600248 }
249
250 /* 3. setup iso_packet_descriptor */
251 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
252 ssize_t len = 0;
253
254 iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
255 if (!iso_buffer) {
256 usbip_event_add(&sdev->ud,
257 SDEV_EVENT_ERROR_MALLOC);
Arjan Mels28276a22011-04-05 20:26:59 +0200258 kfree(iov);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600259 return -1;
260 }
261
Arjan Mels28276a22011-04-05 20:26:59 +0200262 iov[iovnum].iov_base = iso_buffer;
263 iov[iovnum].iov_len = len;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600264 txsize += len;
Arjan Mels28276a22011-04-05 20:26:59 +0200265 iovnum++;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600266 }
267
Arjan Mels28276a22011-04-05 20:26:59 +0200268 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg,
269 iov, iovnum, txsize);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600270 if (ret != txsize) {
Alexander Popov8c7003a32016-04-28 13:07:22 +0300271 dev_err(&sdev->udev->dev,
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600272 "sendmsg failed!, retval %d for %zd\n",
273 ret, txsize);
Arjan Mels28276a22011-04-05 20:26:59 +0200274 kfree(iov);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600275 kfree(iso_buffer);
276 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
277 return -1;
278 }
279
Arjan Mels28276a22011-04-05 20:26:59 +0200280 kfree(iov);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600281 kfree(iso_buffer);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600282
283 total_size += txsize;
284 }
285
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600286 spin_lock_irqsave(&sdev->priv_lock, flags);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600287 list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) {
288 stub_free_priv_and_urb(priv);
289 }
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600290 spin_unlock_irqrestore(&sdev->priv_lock, flags);
291
292 return total_size;
293}
294
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600295static struct stub_unlink *dequeue_from_unlink_tx(struct stub_device *sdev)
296{
297 unsigned long flags;
298 struct stub_unlink *unlink, *tmp;
299
300 spin_lock_irqsave(&sdev->priv_lock, flags);
301
302 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) {
303 list_move_tail(&unlink->list, &sdev->unlink_free);
304 spin_unlock_irqrestore(&sdev->priv_lock, flags);
305 return unlink;
306 }
307
308 spin_unlock_irqrestore(&sdev->priv_lock, flags);
309
310 return NULL;
311}
312
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600313static int stub_send_ret_unlink(struct stub_device *sdev)
314{
315 unsigned long flags;
316 struct stub_unlink *unlink, *tmp;
317
318 struct msghdr msg;
319 struct kvec iov[1];
320 size_t txsize;
321
322 size_t total_size = 0;
323
324 while ((unlink = dequeue_from_unlink_tx(sdev)) != NULL) {
325 int ret;
326 struct usbip_header pdu_header;
327
328 txsize = 0;
329 memset(&pdu_header, 0, sizeof(pdu_header));
330 memset(&msg, 0, sizeof(msg));
331 memset(&iov, 0, sizeof(iov));
332
Brian G. Merrellb8868e42009-07-21 00:46:13 -0600333 usbip_dbg_stub_tx("setup ret unlink %lu\n", unlink->seqnum);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600334
335 /* 1. setup usbip_header */
336 setup_ret_unlink_pdu(&pdu_header, unlink);
337 usbip_header_correct_endian(&pdu_header, 1);
338
339 iov[0].iov_base = &pdu_header;
340 iov[0].iov_len = sizeof(pdu_header);
341 txsize += sizeof(pdu_header);
342
343 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov,
344 1, txsize);
345 if (ret != txsize) {
Alexander Popov8c7003a32016-04-28 13:07:22 +0300346 dev_err(&sdev->udev->dev,
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600347 "sendmsg failed!, retval %d for %zd\n",
348 ret, txsize);
349 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
350 return -1;
351 }
352
Brian G. Merrellb8868e42009-07-21 00:46:13 -0600353 usbip_dbg_stub_tx("send txdata\n");
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600354 total_size += txsize;
355 }
356
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600357 spin_lock_irqsave(&sdev->priv_lock, flags);
358
359 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) {
360 list_del(&unlink->list);
361 kfree(unlink);
362 }
363
364 spin_unlock_irqrestore(&sdev->priv_lock, flags);
365
366 return total_size;
367}
368
Arnd Bergmann9720b4b2011-03-02 00:13:05 +0100369int stub_tx_loop(void *data)
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600370{
Arnd Bergmann9720b4b2011-03-02 00:13:05 +0100371 struct usbip_device *ud = data;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600372 struct stub_device *sdev = container_of(ud, struct stub_device, ud);
373
Arnd Bergmann9720b4b2011-03-02 00:13:05 +0100374 while (!kthread_should_stop()) {
Brian G. Merrellb8868e42009-07-21 00:46:13 -0600375 if (usbip_event_happened(ud))
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600376 break;
377
378 /*
379 * send_ret_submit comes earlier than send_ret_unlink. stub_rx
380 * looks at only priv_init queue. If the completion of a URB is
381 * earlier than the receive of CMD_UNLINK, priv is moved to
382 * priv_tx queue and stub_rx does not find the target priv. In
383 * this case, vhci_rx receives the result of the submit request
384 * and then receives the result of the unlink request. The
385 * result of the submit is given back to the usbcore as the
386 * completion of the unlink request. The request of the
387 * unlink is ignored. This is ok because a driver who calls
388 * usb_unlink_urb() understands the unlink was too late by
389 * getting the status of the given-backed URB which has the
390 * status of usb_submit_urb().
391 */
392 if (stub_send_ret_submit(sdev) < 0)
393 break;
394
395 if (stub_send_ret_unlink(sdev) < 0)
396 break;
397
398 wait_event_interruptible(sdev->tx_waitq,
matt mooneyc6956c92011-05-06 03:47:44 -0700399 (!list_empty(&sdev->priv_tx) ||
400 !list_empty(&sdev->unlink_tx) ||
401 kthread_should_stop()));
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600402 }
Arnd Bergmann9720b4b2011-03-02 00:13:05 +0100403
404 return 0;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600405}