blob: 777a4058c407beee5abdacc72bd7c8c7644c3c00 [file] [log] [blame]
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -06001/*
2 * Copyright (C) 2003-2008 Takahiro Hirofuchi
3 *
4 * This is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
17 * USA.
18 */
19
matt mooney7aaacb42011-05-11 22:33:43 -070020#include <asm/byteorder.h>
Arnd Bergmann9720b4b2011-03-02 00:13:05 +010021#include <linux/kthread.h>
matt mooney7aaacb42011-05-11 22:33:43 -070022#include <linux/usb.h>
23#include <linux/usb/hcd.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060025#include "usbip_common.h"
26#include "stub.h"
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060027
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060028static int is_clear_halt_cmd(struct urb *urb)
29{
30 struct usb_ctrlrequest *req;
31
32 req = (struct usb_ctrlrequest *) urb->setup_packet;
33
34 return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
35 (req->bRequestType == USB_RECIP_ENDPOINT) &&
36 (req->wValue == USB_ENDPOINT_HALT);
37}
38
39static int is_set_interface_cmd(struct urb *urb)
40{
41 struct usb_ctrlrequest *req;
42
43 req = (struct usb_ctrlrequest *) urb->setup_packet;
44
45 return (req->bRequest == USB_REQ_SET_INTERFACE) &&
matt mooney64f338e2011-05-06 03:47:43 -070046 (req->bRequestType == USB_RECIP_INTERFACE);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060047}
48
49static int is_set_configuration_cmd(struct urb *urb)
50{
51 struct usb_ctrlrequest *req;
52
53 req = (struct usb_ctrlrequest *) urb->setup_packet;
54
55 return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
matt mooney64f338e2011-05-06 03:47:43 -070056 (req->bRequestType == USB_RECIP_DEVICE);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060057}
58
59static int is_reset_device_cmd(struct urb *urb)
60{
61 struct usb_ctrlrequest *req;
62 __u16 value;
63 __u16 index;
64
65 req = (struct usb_ctrlrequest *) urb->setup_packet;
66 value = le16_to_cpu(req->wValue);
67 index = le16_to_cpu(req->wIndex);
68
69 if ((req->bRequest == USB_REQ_SET_FEATURE) &&
matt mooney64f338e2011-05-06 03:47:43 -070070 (req->bRequestType == USB_RT_PORT) &&
71 (value == USB_PORT_FEAT_RESET)) {
Brian G. Merrellb8868e42009-07-21 00:46:13 -060072 usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -060073 return 1;
74 } else
75 return 0;
76}
77
78static int tweak_clear_halt_cmd(struct urb *urb)
79{
80 struct usb_ctrlrequest *req;
81 int target_endp;
82 int target_dir;
83 int target_pipe;
84 int ret;
85
86 req = (struct usb_ctrlrequest *) urb->setup_packet;
87
88 /*
89 * The stalled endpoint is specified in the wIndex value. The endpoint
90 * of the urb is the target of this clear_halt request (i.e., control
91 * endpoint).
92 */
93 target_endp = le16_to_cpu(req->wIndex) & 0x000f;
94
95 /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
96 target_dir = le16_to_cpu(req->wIndex) & 0x0080;
97
98 if (target_dir)
99 target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
100 else
101 target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
102
103 ret = usb_clear_halt(urb->dev, target_pipe);
104 if (ret < 0)
Himangi Saraogi5fb44712013-11-02 17:31:22 +0530105 dev_err(&urb->dev->dev,
106 "usb_clear_halt error: devnum %d endp %d ret %d\n",
107 urb->dev->devnum, target_endp, ret);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600108 else
Himangi Saraogi5fb44712013-11-02 17:31:22 +0530109 dev_info(&urb->dev->dev,
110 "usb_clear_halt done: devnum %d endp %d\n",
111 urb->dev->devnum, target_endp);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600112
113 return ret;
114}
115
116static int tweak_set_interface_cmd(struct urb *urb)
117{
118 struct usb_ctrlrequest *req;
119 __u16 alternate;
120 __u16 interface;
121 int ret;
122
123 req = (struct usb_ctrlrequest *) urb->setup_packet;
124 alternate = le16_to_cpu(req->wValue);
125 interface = le16_to_cpu(req->wIndex);
126
matt mooney64f338e2011-05-06 03:47:43 -0700127 usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
128 interface, alternate);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600129
130 ret = usb_set_interface(urb->dev, interface, alternate);
131 if (ret < 0)
Himangi Saraogi5fb44712013-11-02 17:31:22 +0530132 dev_err(&urb->dev->dev,
133 "usb_set_interface error: inf %u alt %u ret %d\n",
134 interface, alternate, ret);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600135 else
Himangi Saraogi5fb44712013-11-02 17:31:22 +0530136 dev_info(&urb->dev->dev,
137 "usb_set_interface done: inf %u alt %u\n",
138 interface, alternate);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600139
140 return ret;
141}
142
143static int tweak_set_configuration_cmd(struct urb *urb)
144{
Valentina Manea2c8c9812014-03-08 14:53:32 +0200145 struct stub_priv *priv = (struct stub_priv *) urb->context;
146 struct stub_device *sdev = priv->sdev;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600147 struct usb_ctrlrequest *req;
148 __u16 config;
Valentina Manea2c8c9812014-03-08 14:53:32 +0200149 int err;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600150
151 req = (struct usb_ctrlrequest *) urb->setup_packet;
152 config = le16_to_cpu(req->wValue);
153
Valentina Manea2c8c9812014-03-08 14:53:32 +0200154 err = usb_set_configuration(sdev->udev, config);
155 if (err && err != -ENODEV)
156 dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
157 config, err);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600158 return 0;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600159}
160
161static int tweak_reset_device_cmd(struct urb *urb)
162{
Arjan Melsd2dd0b02011-04-05 20:26:11 +0200163 struct stub_priv *priv = (struct stub_priv *) urb->context;
164 struct stub_device *sdev = priv->sdev;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600165
matt mooney1a4b6f62011-05-19 16:47:32 -0700166 dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600167
Alexander Popov8c7003a32016-04-28 13:07:22 +0300168 if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
Arjan Melsd3ac0772011-05-20 23:25:46 +0200169 dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
170 return 0;
171 }
172 usb_reset_device(sdev->udev);
173 usb_unlock_device(sdev->udev);
174
Arjan Melsd2dd0b02011-04-05 20:26:11 +0200175 return 0;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600176}
177
178/*
179 * clear_halt, set_interface, and set_configuration require special tricks.
180 */
181static void tweak_special_requests(struct urb *urb)
182{
183 if (!urb || !urb->setup_packet)
184 return;
185
186 if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
187 return;
188
189 if (is_clear_halt_cmd(urb))
190 /* tweak clear_halt */
191 tweak_clear_halt_cmd(urb);
192
193 else if (is_set_interface_cmd(urb))
194 /* tweak set_interface */
195 tweak_set_interface_cmd(urb);
196
197 else if (is_set_configuration_cmd(urb))
198 /* tweak set_configuration */
199 tweak_set_configuration_cmd(urb);
200
201 else if (is_reset_device_cmd(urb))
202 tweak_reset_device_cmd(urb);
203 else
Brian G. Merrellb8868e42009-07-21 00:46:13 -0600204 usbip_dbg_stub_rx("no need to tweak\n");
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600205}
206
207/*
208 * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
209 * By unlinking the urb asynchronously, stub_rx can continuously
210 * process coming urbs. Even if the urb is unlinked, its completion
211 * handler will be called and stub_tx will send a return pdu.
212 *
213 * See also comments about unlinking strategy in vhci_hcd.c.
214 */
215static int stub_recv_cmd_unlink(struct stub_device *sdev,
matt mooney64f338e2011-05-06 03:47:43 -0700216 struct usbip_header *pdu)
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600217{
Kurt Kanzenbachc5a73ee2013-04-04 16:03:03 +0200218 int ret;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600219 unsigned long flags;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600220 struct stub_priv *priv;
221
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600222 spin_lock_irqsave(&sdev->priv_lock, flags);
223
Alexander Beregalov88a1e902008-12-07 05:32:46 +0300224 list_for_each_entry(priv, &sdev->priv_init, list) {
Kurt Kanzenbachc5a73ee2013-04-04 16:03:03 +0200225 if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
226 continue;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600227
Kurt Kanzenbachc5a73ee2013-04-04 16:03:03 +0200228 /*
229 * This matched urb is not completed yet (i.e., be in
230 * flight in usb hcd hardware/driver). Now we are
231 * cancelling it. The unlinking flag means that we are
232 * now not going to return the normal result pdu of a
233 * submission request, but going to return a result pdu
234 * of the unlink request.
235 */
236 priv->unlinking = 1;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600237
Kurt Kanzenbachc5a73ee2013-04-04 16:03:03 +0200238 /*
239 * In the case that unlinking flag is on, prev->seqnum
240 * is changed from the seqnum of the cancelling urb to
241 * the seqnum of the unlink request. This will be used
242 * to make the result pdu of the unlink request.
243 */
244 priv->seqnum = pdu->base.seqnum;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600245
Kurt Kanzenbachc5a73ee2013-04-04 16:03:03 +0200246 spin_unlock_irqrestore(&sdev->priv_lock, flags);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600247
Kurt Kanzenbachc5a73ee2013-04-04 16:03:03 +0200248 /*
249 * usb_unlink_urb() is now out of spinlocking to avoid
250 * spinlock recursion since stub_complete() is
251 * sometimes called in this context but not in the
252 * interrupt context. If stub_complete() is executed
253 * before we call usb_unlink_urb(), usb_unlink_urb()
254 * will return an error value. In this case, stub_tx
255 * will return the result pdu of this unlink request
256 * though submission is completed and actual unlinking
257 * is not executed. OK?
258 */
259 /* In the above case, urb->status is not -ECONNRESET,
260 * so a driver in a client host will know the failure
261 * of the unlink request ?
262 */
263 ret = usb_unlink_urb(priv->urb);
264 if (ret != -EINPROGRESS)
265 dev_err(&priv->urb->dev->dev,
Shuah Khan9e9f4252017-12-18 17:23:37 -0700266 "failed to unlink a urb # %lu, ret %d\n",
267 priv->seqnum, ret);
Kurt Kanzenbachc5a73ee2013-04-04 16:03:03 +0200268
269 return 0;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600270 }
271
Brian G. Merrellb8868e42009-07-21 00:46:13 -0600272 usbip_dbg_stub_rx("seqnum %d is not pending\n",
matt mooney64f338e2011-05-06 03:47:43 -0700273 pdu->u.cmd_unlink.seqnum);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600274
275 /*
276 * The urb of the unlink target is not found in priv_init queue. It was
277 * already completed and its results is/was going to be sent by a
278 * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
279 * return the completeness of this unlink request to vhci_hcd.
280 */
281 stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
282
283 spin_unlock_irqrestore(&sdev->priv_lock, flags);
284
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600285 return 0;
286}
287
288static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
289{
290 struct usbip_device *ud = &sdev->ud;
Márton Németh9ba422b2011-05-24 23:19:18 +0200291 int valid = 0;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600292
293 if (pdu->base.devid == sdev->devid) {
Harvey Yangdcf147792013-01-22 13:31:30 +0800294 spin_lock_irq(&ud->lock);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600295 if (ud->status == SDEV_ST_USED) {
296 /* A request is valid. */
Márton Németh9ba422b2011-05-24 23:19:18 +0200297 valid = 1;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600298 }
Harvey Yangdcf147792013-01-22 13:31:30 +0800299 spin_unlock_irq(&ud->lock);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600300 }
301
Márton Németh9ba422b2011-05-24 23:19:18 +0200302 return valid;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600303}
304
305static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
306 struct usbip_header *pdu)
307{
308 struct stub_priv *priv;
309 struct usbip_device *ud = &sdev->ud;
310 unsigned long flags;
311
312 spin_lock_irqsave(&sdev->priv_lock, flags);
313
Wei Yongjune68f2842009-02-06 11:08:58 +0800314 priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600315 if (!priv) {
Alexander Popov8c7003a32016-04-28 13:07:22 +0300316 dev_err(&sdev->udev->dev, "alloc stub_priv\n");
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600317 spin_unlock_irqrestore(&sdev->priv_lock, flags);
318 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
319 return NULL;
320 }
321
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600322 priv->seqnum = pdu->base.seqnum;
323 priv->sdev = sdev;
324
325 /*
326 * After a stub_priv is linked to a list_head,
327 * our error handler can free allocated data.
328 */
329 list_add_tail(&priv->list, &sdev->priv_init);
330
331 spin_unlock_irqrestore(&sdev->priv_lock, flags);
332
333 return priv;
334}
335
Shuah Khanf3e95722017-12-07 14:16:48 -0700336static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600337{
Max Vozeler2d8f4592011-01-12 15:01:59 +0200338 struct usb_device *udev = sdev->udev;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600339 struct usb_host_endpoint *ep;
340 struct usb_endpoint_descriptor *epd = NULL;
Shuah Khanf3e95722017-12-07 14:16:48 -0700341 int epnum = pdu->base.ep;
342 int dir = pdu->base.direction;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600343
Shuah Khan20e825c2017-12-07 14:16:47 -0700344 if (epnum < 0 || epnum > 15)
345 goto err_ret;
346
Endre Kollarab30f122010-07-27 12:39:45 +0200347 if (dir == USBIP_DIR_IN)
348 ep = udev->ep_in[epnum & 0x7f];
349 else
350 ep = udev->ep_out[epnum & 0x7f];
Shuah Khan20e825c2017-12-07 14:16:47 -0700351 if (!ep)
352 goto err_ret;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600353
354 epd = &ep->desc;
Shuah Khanf3e95722017-12-07 14:16:48 -0700355
356 /* validate transfer_buffer_length */
357 if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
358 dev_err(&sdev->udev->dev,
359 "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
360 pdu->u.cmd_submit.transfer_buffer_length);
361 return -1;
362 }
363
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600364 if (usb_endpoint_xfer_control(epd)) {
365 if (dir == USBIP_DIR_OUT)
366 return usb_sndctrlpipe(udev, epnum);
367 else
368 return usb_rcvctrlpipe(udev, epnum);
369 }
370
371 if (usb_endpoint_xfer_bulk(epd)) {
372 if (dir == USBIP_DIR_OUT)
373 return usb_sndbulkpipe(udev, epnum);
374 else
375 return usb_rcvbulkpipe(udev, epnum);
376 }
377
378 if (usb_endpoint_xfer_int(epd)) {
379 if (dir == USBIP_DIR_OUT)
380 return usb_sndintpipe(udev, epnum);
381 else
382 return usb_rcvintpipe(udev, epnum);
383 }
384
385 if (usb_endpoint_xfer_isoc(epd)) {
Malte Leip616ba122019-04-14 12:00:12 +0200386 /* validate number of packets */
Shuah Khanf3e95722017-12-07 14:16:48 -0700387 if (pdu->u.cmd_submit.number_of_packets < 0 ||
Malte Leip616ba122019-04-14 12:00:12 +0200388 pdu->u.cmd_submit.number_of_packets >
389 USBIP_MAX_ISO_PACKETS) {
Shuah Khanf3e95722017-12-07 14:16:48 -0700390 dev_err(&sdev->udev->dev,
391 "CMD_SUBMIT: isoc invalid num packets %d\n",
392 pdu->u.cmd_submit.number_of_packets);
393 return -1;
394 }
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600395 if (dir == USBIP_DIR_OUT)
396 return usb_sndisocpipe(udev, epnum);
397 else
398 return usb_rcvisocpipe(udev, epnum);
399 }
400
Shuah Khan20e825c2017-12-07 14:16:47 -0700401err_ret:
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600402 /* NOT REACHED */
Shuah Khanf3e95722017-12-07 14:16:48 -0700403 dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
Shuah Khan20e825c2017-12-07 14:16:47 -0700404 return -1;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600405}
406
Endre Kollarb7a937e2010-07-27 12:40:00 +0200407static void masking_bogus_flags(struct urb *urb)
408{
409 int xfertype;
410 struct usb_device *dev;
411 struct usb_host_endpoint *ep;
412 int is_out;
413 unsigned int allowed;
414
415 if (!urb || urb->hcpriv || !urb->complete)
416 return;
417 dev = urb->dev;
418 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
419 return;
420
421 ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
matt mooney64f338e2011-05-06 03:47:43 -0700422 [usb_pipeendpoint(urb->pipe)];
Endre Kollarb7a937e2010-07-27 12:40:00 +0200423 if (!ep)
424 return;
425
426 xfertype = usb_endpoint_type(&ep->desc);
427 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
428 struct usb_ctrlrequest *setup =
matt mooney64f338e2011-05-06 03:47:43 -0700429 (struct usb_ctrlrequest *) urb->setup_packet;
Endre Kollarb7a937e2010-07-27 12:40:00 +0200430
431 if (!setup)
432 return;
433 is_out = !(setup->bRequestType & USB_DIR_IN) ||
matt mooney64f338e2011-05-06 03:47:43 -0700434 !setup->wLength;
Endre Kollarb7a937e2010-07-27 12:40:00 +0200435 } else {
436 is_out = usb_endpoint_dir_out(&ep->desc);
437 }
438
439 /* enforce simple/standard policy */
Greg Kroah-Hartman39248652010-07-27 11:12:21 -0700440 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
441 URB_DIR_MASK | URB_FREE_BUFFER);
Endre Kollarb7a937e2010-07-27 12:40:00 +0200442 switch (xfertype) {
443 case USB_ENDPOINT_XFER_BULK:
444 if (is_out)
445 allowed |= URB_ZERO_PACKET;
446 /* FALLTHROUGH */
447 case USB_ENDPOINT_XFER_CONTROL:
448 allowed |= URB_NO_FSBR; /* only affects UHCI */
449 /* FALLTHROUGH */
450 default: /* all non-iso endpoints */
451 if (!is_out)
452 allowed |= URB_SHORT_NOT_OK;
453 break;
454 case USB_ENDPOINT_XFER_ISOC:
455 allowed |= URB_ISO_ASAP;
456 break;
457 }
458 urb->transfer_flags &= allowed;
459}
460
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600461static void stub_recv_cmd_submit(struct stub_device *sdev,
462 struct usbip_header *pdu)
463{
464 int ret;
465 struct stub_priv *priv;
466 struct usbip_device *ud = &sdev->ud;
Max Vozeler2d8f4592011-01-12 15:01:59 +0200467 struct usb_device *udev = sdev->udev;
Shuah Khanf3e95722017-12-07 14:16:48 -0700468 int pipe = get_pipe(sdev, pdu);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600469
Shuah Khan20e825c2017-12-07 14:16:47 -0700470 if (pipe == -1)
471 return;
472
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600473 priv = stub_priv_alloc(sdev, pdu);
474 if (!priv)
475 return;
476
477 /* setup a urb */
478 if (usb_pipeisoc(pipe))
479 priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
matt mooney64f338e2011-05-06 03:47:43 -0700480 GFP_KERNEL);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600481 else
482 priv->urb = usb_alloc_urb(0, GFP_KERNEL);
483
484 if (!priv->urb) {
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600485 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
486 return;
487 }
488
Bart Westgeestc7f00892012-10-10 13:34:27 -0400489 /* allocate urb transfer buffer, if needed */
Shuah Khanf3e95722017-12-07 14:16:48 -0700490 if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
491 pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600492 priv->urb->transfer_buffer =
493 kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
matt mooney64f338e2011-05-06 03:47:43 -0700494 GFP_KERNEL);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600495 if (!priv->urb->transfer_buffer) {
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600496 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
497 return;
498 }
499 }
500
Bart Westgeestc7f00892012-10-10 13:34:27 -0400501 /* copy urb setup packet */
Julia Lawall94002c02010-05-15 23:21:43 +0200502 priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
503 GFP_KERNEL);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600504 if (!priv->urb->setup_packet) {
Alexander Popov8c7003a32016-04-28 13:07:22 +0300505 dev_err(&udev->dev, "allocate setup_packet\n");
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600506 usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
507 return;
508 }
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600509
510 /* set other members from the base header of pdu */
511 priv->urb->context = (void *) priv;
512 priv->urb->dev = udev;
513 priv->urb->pipe = pipe;
514 priv->urb->complete = stub_complete;
515
516 usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0);
517
518
519 if (usbip_recv_xbuff(ud, priv->urb) < 0)
520 return;
521
522 if (usbip_recv_iso(ud, priv->urb) < 0)
523 return;
524
525 /* no need to submit an intercepted request, but harmless? */
526 tweak_special_requests(priv->urb);
527
Endre Kollarb7a937e2010-07-27 12:40:00 +0200528 masking_bogus_flags(priv->urb);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600529 /* urb is now ready to submit */
530 ret = usb_submit_urb(priv->urb, GFP_KERNEL);
531
532 if (ret == 0)
Brian G. Merrellb8868e42009-07-21 00:46:13 -0600533 usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
matt mooney64f338e2011-05-06 03:47:43 -0700534 pdu->base.seqnum);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600535 else {
Alexander Popov8c7003a32016-04-28 13:07:22 +0300536 dev_err(&udev->dev, "submit_urb error, %d\n", ret);
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600537 usbip_dump_header(pdu);
538 usbip_dump_urb(priv->urb);
539
540 /*
541 * Pessimistic.
542 * This connection will be discarded.
543 */
544 usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
545 }
546
Brian G. Merrellb8868e42009-07-21 00:46:13 -0600547 usbip_dbg_stub_rx("Leave\n");
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600548}
549
550/* recv a pdu */
551static void stub_rx_pdu(struct usbip_device *ud)
552{
553 int ret;
554 struct usbip_header pdu;
555 struct stub_device *sdev = container_of(ud, struct stub_device, ud);
Valentina Maneab7945b72014-01-23 23:12:29 +0200556 struct device *dev = &sdev->udev->dev;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600557
Brian G. Merrellb8868e42009-07-21 00:46:13 -0600558 usbip_dbg_stub_rx("Enter\n");
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600559
560 memset(&pdu, 0, sizeof(pdu));
561
Kurt Kanzenbach7717880742013-04-04 16:03:05 +0200562 /* receive a pdu header */
Bart Westgeest5a08c522011-12-19 17:44:11 -0500563 ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600564 if (ret != sizeof(pdu)) {
565 dev_err(dev, "recv a header, %d\n", ret);
566 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
567 return;
568 }
569
570 usbip_header_correct_endian(&pdu, 0);
571
Brian G. Merrellb8868e42009-07-21 00:46:13 -0600572 if (usbip_dbg_flag_stub_rx)
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600573 usbip_dump_header(&pdu);
574
575 if (!valid_request(sdev, &pdu)) {
576 dev_err(dev, "recv invalid request\n");
577 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
578 return;
579 }
580
581 switch (pdu.base.command) {
582 case USBIP_CMD_UNLINK:
583 stub_recv_cmd_unlink(sdev, &pdu);
584 break;
585
586 case USBIP_CMD_SUBMIT:
587 stub_recv_cmd_submit(sdev, &pdu);
588 break;
589
590 default:
591 /* NOTREACHED */
592 dev_err(dev, "unknown pdu\n");
593 usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
matt mooney49aecef2011-05-06 03:47:54 -0700594 break;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600595 }
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600596}
597
Arnd Bergmann9720b4b2011-03-02 00:13:05 +0100598int stub_rx_loop(void *data)
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600599{
Arnd Bergmann9720b4b2011-03-02 00:13:05 +0100600 struct usbip_device *ud = data;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600601
Arnd Bergmann9720b4b2011-03-02 00:13:05 +0100602 while (!kthread_should_stop()) {
Brian G. Merrellb8868e42009-07-21 00:46:13 -0600603 if (usbip_event_happened(ud))
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600604 break;
605
606 stub_rx_pdu(ud);
607 }
matt mooney64f338e2011-05-06 03:47:43 -0700608
Arnd Bergmann9720b4b2011-03-02 00:13:05 +0100609 return 0;
Takahiro Hirofuchi4d7b5c72008-07-09 14:56:51 -0600610}