blob: 4dbeba1acb45ede3cbce1a1b809ad31b5b1bbc03 [file] [log] [blame]
David Brownell2b3d9422008-06-19 18:19:28 -07001/*
2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
David Brownell2b3d9422008-06-19 18:19:28 -070012 */
13
14/* #define VERBOSE_DEBUG */
15
16#include <linux/kernel.h>
Sebastian Andrzej Siewior98303172012-09-10 16:30:50 +020017#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/gfp.h>
David Brownell2b3d9422008-06-19 18:19:28 -070019#include <linux/device.h>
20#include <linux/ctype.h>
21#include <linux/etherdevice.h>
22#include <linux/ethtool.h>
Ian Coolidge4fe5f072012-11-07 14:39:18 +000023#include <linux/if_vlan.h>
Hemant Kumarfc49dbd2018-01-23 12:08:47 -080024#include <linux/if_arp.h>
25#include <linux/msm_rmnet.h>
David Brownell2b3d9422008-06-19 18:19:28 -070026
27#include "u_ether.h"
28
29
30/*
31 * This component encapsulates the Ethernet link glue needed to provide
32 * one (!) network link through the USB gadget stack, normally "usb0".
33 *
34 * The control and data models are handled by the function driver which
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -050035 * connects to this code; such as CDC Ethernet (ECM or EEM),
36 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
37 * management.
David Brownell2b3d9422008-06-19 18:19:28 -070038 *
39 * Link level addressing is handled by this component using module
40 * parameters; if no such parameters are provided, random link level
41 * addresses are used. Each end of the link uses one address. The
42 * host end address is exported in various ways, and is often recorded
43 * in configuration databases.
44 *
45 * The driver which assembles each configuration using such a link is
46 * responsible for ensuring that each configuration includes at most one
47 * instance of is network link. (The network layer provides ways for
48 * this single "physical" link to be used by multiple virtual links.)
49 */
50
David Brownell8a1ce2c2008-08-18 17:43:56 -070051#define UETH__VERSION "29-May-2008"
David Brownell2b3d9422008-06-19 18:19:28 -070052
Mike Looijmansbba787a2015-08-05 08:54:55 +020053/* Experiments show that both Linux and Windows hosts allow up to 16k
54 * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
55 * blocks and still have efficient handling. */
56#define GETHER_MAX_ETH_FRAME_LEN 15412
57
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -070058static struct workqueue_struct *uether_wq;
59
David Brownell2b3d9422008-06-19 18:19:28 -070060struct eth_dev {
61 /* lock is held while accessing port_usb
David Brownell2b3d9422008-06-19 18:19:28 -070062 */
63 spinlock_t lock;
64 struct gether *port_usb;
65
66 struct net_device *net;
67 struct usb_gadget *gadget;
68
69 spinlock_t req_lock; /* guard {rx,tx}_reqs */
70 struct list_head tx_reqs, rx_reqs;
Badhri Jagan Sridharancb6a7862014-09-18 10:42:41 -070071 unsigned tx_qlen;
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -070072/* Minimum number of TX USB request queued to UDC */
73#define TX_REQ_THRESHOLD 5
74 int no_tx_req_used;
75 int tx_skb_hold_count;
76 u32 tx_req_bufsize;
David Brownell2b3d9422008-06-19 18:19:28 -070077
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -050078 struct sk_buff_head rx_frames;
79
Andrzej Pietrasiewiczf1a18232013-05-23 09:22:03 +020080 unsigned qmult;
81
David Brownell2b3d9422008-06-19 18:19:28 -070082 unsigned header_len;
xerox_lin87bebf82014-08-14 14:48:44 +080083 unsigned ul_max_pkts_per_xfer;
xerox_lincdffcb82014-09-04 16:01:59 +080084 unsigned dl_max_pkts_per_xfer;
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -050085 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
86 int (*unwrap)(struct gether *,
87 struct sk_buff *skb,
88 struct sk_buff_head *list);
David Brownell2b3d9422008-06-19 18:19:28 -070089
90 struct work_struct work;
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -070091 struct work_struct rx_work;
David Brownell2b3d9422008-06-19 18:19:28 -070092
93 unsigned long todo;
Hemant Kumarfc49dbd2018-01-23 12:08:47 -080094 unsigned long flags;
95 unsigned short rx_needed_headroom;
David Brownell2b3d9422008-06-19 18:19:28 -070096#define WORK_RX_MEMORY 0
97
98 bool zlp;
Yoshihiro Shimoda05f6b0f2016-08-22 17:48:26 +090099 bool no_skb_reserve;
David Brownell2b3d9422008-06-19 18:19:28 -0700100 u8 host_mac[ETH_ALEN];
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +0200101 u8 dev_mac[ETH_ALEN];
David Brownell2b3d9422008-06-19 18:19:28 -0700102};
103
104/*-------------------------------------------------------------------------*/
105
106#define RX_EXTRA 20 /* bytes guarding against rx overflows */
107
108#define DEFAULT_QLEN 2 /* double buffering by default */
109
Paul Zimmerman04617db2011-06-27 14:13:18 -0700110/* for dual-speed hardware, use deeper queues at high/super speed */
Andrzej Pietrasiewiczf1a18232013-05-23 09:22:03 +0200111static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
David Brownell2b3d9422008-06-19 18:19:28 -0700112{
Paul Zimmerman04617db2011-06-27 14:13:18 -0700113 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
114 gadget->speed == USB_SPEED_SUPER))
David Brownell2b3d9422008-06-19 18:19:28 -0700115 return qmult * DEFAULT_QLEN;
116 else
117 return DEFAULT_QLEN;
118}
119
120/*-------------------------------------------------------------------------*/
121
122/* REVISIT there must be a better way than having two sets
123 * of debug calls ...
124 */
125
126#undef DBG
127#undef VDBG
128#undef ERROR
David Brownell2b3d9422008-06-19 18:19:28 -0700129#undef INFO
130
131#define xprintk(d, level, fmt, args...) \
132 printk(level "%s: " fmt , (d)->net->name , ## args)
133
134#ifdef DEBUG
135#undef DEBUG
136#define DBG(dev, fmt, args...) \
137 xprintk(dev , KERN_DEBUG , fmt , ## args)
138#else
139#define DBG(dev, fmt, args...) \
140 do { } while (0)
141#endif /* DEBUG */
142
143#ifdef VERBOSE_DEBUG
144#define VDBG DBG
145#else
146#define VDBG(dev, fmt, args...) \
147 do { } while (0)
148#endif /* DEBUG */
149
150#define ERROR(dev, fmt, args...) \
151 xprintk(dev , KERN_ERR , fmt , ## args)
David Brownell2b3d9422008-06-19 18:19:28 -0700152#define INFO(dev, fmt, args...) \
153 xprintk(dev , KERN_INFO , fmt , ## args)
154
155/*-------------------------------------------------------------------------*/
156
157/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
158
Stephen Hemmingerccad6372008-11-19 22:42:31 -0800159static int ueth_change_mtu(struct net_device *net, int new_mtu)
David Brownell2b3d9422008-06-19 18:19:28 -0700160{
Mike Looijmansab738ff2015-11-30 12:18:23 +0100161 if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN)
162 return -ERANGE;
163 net->mtu = new_mtu;
David Brownell2b3d9422008-06-19 18:19:28 -0700164
Mike Looijmansab738ff2015-11-30 12:18:23 +0100165 return 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700166}
167
Hemant Kumarfc49dbd2018-01-23 12:08:47 -0800168static int ueth_change_mtu_ip(struct net_device *net, int new_mtu)
169{
170 struct eth_dev *dev = netdev_priv(net);
171 unsigned long flags;
172 int status = 0;
173
174 spin_lock_irqsave(&dev->lock, flags);
175 if (new_mtu <= 0)
176 status = -EINVAL;
177 else
178 net->mtu = new_mtu;
179
180 DBG(dev, "[%s] MTU change: old=%d new=%d\n", net->name,
181 net->mtu, new_mtu);
182 spin_unlock_irqrestore(&dev->lock, flags);
183
184 return status;
185}
186
David Brownell2b3d9422008-06-19 18:19:28 -0700187static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
188{
Jiri Pirko7826d432013-01-06 00:44:26 +0000189 struct eth_dev *dev = netdev_priv(net);
David Brownell2b3d9422008-06-19 18:19:28 -0700190
Jiri Pirko7826d432013-01-06 00:44:26 +0000191 strlcpy(p->driver, "g_ether", sizeof(p->driver));
192 strlcpy(p->version, UETH__VERSION, sizeof(p->version));
193 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
194 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
David Brownell2b3d9422008-06-19 18:19:28 -0700195}
196
David Brownell2b3d9422008-06-19 18:19:28 -0700197/* REVISIT can also support:
198 * - WOL (by tracking suspends and issuing remote wakeup)
199 * - msglevel (implies updated messaging)
200 * - ... probably more ethtool ops
201 */
202
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700203static const struct ethtool_ops ops = {
David Brownell2b3d9422008-06-19 18:19:28 -0700204 .get_drvinfo = eth_get_drvinfo,
Jonathan McDowell237e75b2009-03-26 00:45:27 -0700205 .get_link = ethtool_op_get_link,
David Brownell2b3d9422008-06-19 18:19:28 -0700206};
207
208static void defer_kevent(struct eth_dev *dev, int flag)
209{
210 if (test_and_set_bit(flag, &dev->todo))
211 return;
212 if (!schedule_work(&dev->work))
213 ERROR(dev, "kevent %d may have been dropped\n", flag);
214 else
215 DBG(dev, "kevent %d scheduled\n", flag);
216}
217
218static void rx_complete(struct usb_ep *ep, struct usb_request *req);
219
220static int
221rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
222{
223 struct sk_buff *skb;
224 int retval = -ENOMEM;
225 size_t size = 0;
226 struct usb_ep *out;
227 unsigned long flags;
228
229 spin_lock_irqsave(&dev->lock, flags);
230 if (dev->port_usb)
231 out = dev->port_usb->out_ep;
232 else
233 out = NULL;
234 spin_unlock_irqrestore(&dev->lock, flags);
235
236 if (!out)
237 return -ENOTCONN;
238
239
240 /* Padding up to RX_EXTRA handles minor disagreements with host.
241 * Normally we use the USB "terminate on short read" convention;
242 * so allow up to (N*maxpacket), since that memory is normally
243 * already allocated. Some hardware doesn't deal well with short
244 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
245 * byte off the end (to force hardware errors on overflow).
246 *
247 * RNDIS uses internal framing, and explicitly allows senders to
248 * pad to end-of-packet. That's potentially nice for speed, but
249 * means receivers can't recover lost synch on their own (because
250 * new packets don't only start after a short RX).
251 */
252 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
253 size += dev->port_usb->header_len;
254 size += out->maxpacket - 1;
255 size -= size % out->maxpacket;
256
xerox_lin87bebf82014-08-14 14:48:44 +0800257 if (dev->ul_max_pkts_per_xfer)
258 size *= dev->ul_max_pkts_per_xfer;
259
Yauheni Kaliuta5c1168d2010-12-08 13:12:04 +0200260 if (dev->port_usb->is_fixed)
Stephen Hemminger45d1b7a2011-03-01 22:40:57 -0800261 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
Yauheni Kaliuta5c1168d2010-12-08 13:12:04 +0200262
Amit Pundir5ff0eb22016-01-08 19:36:02 +0530263 DBG(dev, "%s: size: %zd\n", __func__, size);
David Brownell2b3d9422008-06-19 18:19:28 -0700264 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
265 if (skb == NULL) {
266 DBG(dev, "no rx skb\n");
267 goto enomem;
268 }
269
270 /* Some platforms perform better when IP packets are aligned,
271 * but on at least one, checksumming fails otherwise. Note:
272 * RNDIS headers involve variable numbers of LE32 values.
273 */
Yoshihiro Shimoda05f6b0f2016-08-22 17:48:26 +0900274 if (likely(!dev->no_skb_reserve))
275 skb_reserve(skb, NET_IP_ALIGN);
David Brownell2b3d9422008-06-19 18:19:28 -0700276
277 req->buf = skb->data;
278 req->length = size;
279 req->complete = rx_complete;
280 req->context = skb;
281
282 retval = usb_ep_queue(out, req, gfp_flags);
283 if (retval == -ENOMEM)
284enomem:
285 defer_kevent(dev, WORK_RX_MEMORY);
286 if (retval) {
287 DBG(dev, "rx submit --> %d\n", retval);
288 if (skb)
289 dev_kfree_skb_any(skb);
David Brownell2b3d9422008-06-19 18:19:28 -0700290 }
291 return retval;
292}
293
294static void rx_complete(struct usb_ep *ep, struct usb_request *req)
295{
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -0700296 struct sk_buff *skb = req->context;
David Brownell2b3d9422008-06-19 18:19:28 -0700297 struct eth_dev *dev = ep->driver_data;
298 int status = req->status;
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -0700299 bool queue = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700300
301 switch (status) {
302
303 /* normal completion */
304 case 0:
305 skb_put(skb, req->actual);
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500306
307 if (dev->unwrap) {
308 unsigned long flags;
309
310 spin_lock_irqsave(&dev->lock, flags);
311 if (dev->port_usb) {
312 status = dev->unwrap(dev->port_usb,
313 skb,
314 &dev->rx_frames);
Badhri Jagan Sridharan8424b3e2014-09-18 10:48:48 -0700315 if (status == -EINVAL)
316 dev->net->stats.rx_errors++;
317 else if (status == -EOVERFLOW)
318 dev->net->stats.rx_over_errors++;
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500319 } else {
320 dev_kfree_skb_any(skb);
321 status = -ENOTCONN;
322 }
323 spin_unlock_irqrestore(&dev->lock, flags);
324 } else {
325 skb_queue_tail(&dev->rx_frames, skb);
David Brownell2b3d9422008-06-19 18:19:28 -0700326 }
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -0700327 if (!status)
328 queue = 1;
David Brownell2b3d9422008-06-19 18:19:28 -0700329 break;
330
331 /* software-driven interface shutdown */
332 case -ECONNRESET: /* unlink */
333 case -ESHUTDOWN: /* disconnect etc */
334 VDBG(dev, "rx shutdown, code %d\n", status);
335 goto quiesce;
336
337 /* for hardware automagic (such as pxa) */
338 case -ECONNABORTED: /* endpoint reset */
339 DBG(dev, "rx %s reset\n", ep->name);
340 defer_kevent(dev, WORK_RX_MEMORY);
341quiesce:
342 dev_kfree_skb_any(skb);
343 goto clean;
344
345 /* data overrun */
346 case -EOVERFLOW:
347 dev->net->stats.rx_over_errors++;
348 /* FALLTHROUGH */
349
350 default:
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -0700351 queue = 1;
352 dev_kfree_skb_any(skb);
David Brownell2b3d9422008-06-19 18:19:28 -0700353 dev->net->stats.rx_errors++;
354 DBG(dev, "rx status %d\n", status);
355 break;
356 }
357
David Brownell2b3d9422008-06-19 18:19:28 -0700358clean:
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -0700359 spin_lock(&dev->req_lock);
360 list_add(&req->list, &dev->rx_reqs);
361 spin_unlock(&dev->req_lock);
362
363 if (queue)
364 queue_work(uether_wq, &dev->rx_work);
David Brownell2b3d9422008-06-19 18:19:28 -0700365}
366
367static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
368{
369 unsigned i;
370 struct usb_request *req;
371
372 if (!n)
373 return -ENOMEM;
374
375 /* queue/recycle up to N requests */
376 i = n;
377 list_for_each_entry(req, list, list) {
378 if (i-- == 0)
379 goto extra;
380 }
381 while (i--) {
382 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
383 if (!req)
384 return list_empty(list) ? -ENOMEM : 0;
385 list_add(&req->list, list);
386 }
387 return 0;
388
389extra:
390 /* free extras */
391 for (;;) {
392 struct list_head *next;
393
394 next = req->list.next;
395 list_del(&req->list);
396 usb_ep_free_request(ep, req);
397
398 if (next == list)
399 break;
400
401 req = container_of(next, struct usb_request, list);
402 }
403 return 0;
404}
405
406static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
407{
Hemant Kumar8a7c8122018-01-05 11:47:46 -0800408 int status = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700409
410 spin_lock(&dev->req_lock);
Hemant Kumar8a7c8122018-01-05 11:47:46 -0800411 if (link->in_ep) {
412 status = prealloc(&dev->tx_reqs, link->in_ep, n);
413 if (status < 0)
414 goto fail;
415 }
416
417 if (link->out_ep) {
418 status = prealloc(&dev->rx_reqs, link->out_ep, n);
419 if (status < 0)
420 goto fail;
421 }
David Brownell2b3d9422008-06-19 18:19:28 -0700422 goto done;
423fail:
424 DBG(dev, "can't alloc requests\n");
425done:
426 spin_unlock(&dev->req_lock);
427 return status;
428}
429
430static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
431{
432 struct usb_request *req;
433 unsigned long flags;
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -0700434 int req_cnt = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700435
436 /* fill unused rxq slots with some skb */
437 spin_lock_irqsave(&dev->req_lock, flags);
438 while (!list_empty(&dev->rx_reqs)) {
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -0700439 /* break the nexus of continuous completion and re-submission*/
Praneeth Bajjuri6b4b51c2015-01-22 16:38:56 -0600440 if (++req_cnt > qlen(dev->gadget, dev->qmult))
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -0700441 break;
442
David Brownell2b3d9422008-06-19 18:19:28 -0700443 req = container_of(dev->rx_reqs.next,
444 struct usb_request, list);
445 list_del_init(&req->list);
446 spin_unlock_irqrestore(&dev->req_lock, flags);
447
448 if (rx_submit(dev, req, gfp_flags) < 0) {
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -0700449 spin_lock_irqsave(&dev->req_lock, flags);
450 list_add(&req->list, &dev->rx_reqs);
451 spin_unlock_irqrestore(&dev->req_lock, flags);
David Brownell2b3d9422008-06-19 18:19:28 -0700452 defer_kevent(dev, WORK_RX_MEMORY);
453 return;
454 }
455
456 spin_lock_irqsave(&dev->req_lock, flags);
457 }
458 spin_unlock_irqrestore(&dev->req_lock, flags);
459}
460
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -0700461static void process_rx_w(struct work_struct *work)
462{
463 struct eth_dev *dev = container_of(work, struct eth_dev, rx_work);
464 struct sk_buff *skb;
465 int status = 0;
466
467 if (!dev->port_usb)
468 return;
469
470 while ((skb = skb_dequeue(&dev->rx_frames))) {
471 if (status < 0
472 || ETH_HLEN > skb->len
473 || skb->len > ETH_FRAME_LEN) {
474 dev->net->stats.rx_errors++;
475 dev->net->stats.rx_length_errors++;
476 DBG(dev, "rx length %d\n", skb->len);
477 dev_kfree_skb_any(skb);
478 continue;
479 }
480 skb->protocol = eth_type_trans(skb, dev->net);
481 dev->net->stats.rx_packets++;
482 dev->net->stats.rx_bytes += skb->len;
483
484 status = netif_rx_ni(skb);
485 }
486
487 if (netif_running(dev->net))
488 rx_fill(dev, GFP_KERNEL);
489}
490
David Brownell2b3d9422008-06-19 18:19:28 -0700491static void eth_work(struct work_struct *work)
492{
493 struct eth_dev *dev = container_of(work, struct eth_dev, work);
494
495 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
496 if (netif_running(dev->net))
497 rx_fill(dev, GFP_KERNEL);
498 }
499
500 if (dev->todo)
501 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
502}
503
504static void tx_complete(struct usb_ep *ep, struct usb_request *req)
505{
506 struct sk_buff *skb = req->context;
507 struct eth_dev *dev = ep->driver_data;
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700508 struct net_device *net = dev->net;
509 struct usb_request *new_req;
510 struct usb_ep *in;
511 int length;
512 int retval;
David Brownell2b3d9422008-06-19 18:19:28 -0700513
Rajkumar Raghupathyd8682452013-01-28 11:48:47 +0530514 if (!dev->port_usb) {
515 usb_ep_free_request(ep, req);
516 return;
517 }
518
David Brownell2b3d9422008-06-19 18:19:28 -0700519 switch (req->status) {
520 default:
521 dev->net->stats.tx_errors++;
522 VDBG(dev, "tx err %d\n", req->status);
523 /* FALLTHROUGH */
524 case -ECONNRESET: /* unlink */
525 case -ESHUTDOWN: /* disconnect etc */
526 break;
527 case 0:
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700528 if (!req->zero)
529 dev->net->stats.tx_bytes += req->length-1;
530 else
531 dev->net->stats.tx_bytes += req->length;
David Brownell2b3d9422008-06-19 18:19:28 -0700532 }
533 dev->net->stats.tx_packets++;
534
535 spin_lock(&dev->req_lock);
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700536 list_add_tail(&req->list, &dev->tx_reqs);
537
538 if (dev->port_usb->multi_pkt_xfer) {
539 dev->no_tx_req_used--;
540 req->length = 0;
541 in = dev->port_usb->in_ep;
542
543 if (!list_empty(&dev->tx_reqs)) {
544 new_req = container_of(dev->tx_reqs.next,
545 struct usb_request, list);
546 list_del(&new_req->list);
547 spin_unlock(&dev->req_lock);
548 if (new_req->length > 0) {
549 length = new_req->length;
550
551 /* NCM requires no zlp if transfer is
552 * dwNtbInMaxSize */
553 if (dev->port_usb->is_fixed &&
554 length == dev->port_usb->fixed_in_len &&
555 (length % in->maxpacket) == 0)
556 new_req->zero = 0;
557 else
558 new_req->zero = 1;
559
560 /* use zlp framing on tx for strict CDC-Ether
561 * conformance, though any robust network rx
562 * path ignores extra padding. and some hardware
563 * doesn't like to write zlps.
564 */
565 if (new_req->zero && !dev->zlp &&
566 (length % in->maxpacket) == 0) {
567 new_req->zero = 0;
568 length++;
569 }
570
571 new_req->length = length;
572 retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
573 switch (retval) {
574 default:
575 DBG(dev, "tx queue err %d\n", retval);
Pavankumar Kondetib8a33622013-04-01 18:13:32 +0530576 new_req->length = 0;
577 spin_lock(&dev->req_lock);
578 list_add_tail(&new_req->list,
579 &dev->tx_reqs);
580 spin_unlock(&dev->req_lock);
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700581 break;
582 case 0:
583 spin_lock(&dev->req_lock);
584 dev->no_tx_req_used++;
585 spin_unlock(&dev->req_lock);
Amit Pundir09172082016-05-30 15:19:21 +0530586 netif_trans_update(net);
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700587 }
588 } else {
589 spin_lock(&dev->req_lock);
Pavankumar Kondetib8a33622013-04-01 18:13:32 +0530590 /*
591 * Put the idle request at the back of the
592 * queue. The xmit function will put the
593 * unfinished request at the beginning of the
594 * queue.
595 */
596 list_add_tail(&new_req->list, &dev->tx_reqs);
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700597 spin_unlock(&dev->req_lock);
598 }
599 } else {
600 spin_unlock(&dev->req_lock);
601 }
602 } else {
603 spin_unlock(&dev->req_lock);
604 dev_kfree_skb_any(skb);
605 }
David Brownell2b3d9422008-06-19 18:19:28 -0700606
David Brownell2b3d9422008-06-19 18:19:28 -0700607 if (netif_carrier_ok(dev->net))
608 netif_wake_queue(dev->net);
609}
610
611static inline int is_promisc(u16 cdc_filter)
612{
613 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
614}
615
Rajkumar Raghupathy7d4a6cb2013-05-23 11:37:41 +0530616static int alloc_tx_buffer(struct eth_dev *dev)
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700617{
618 struct list_head *act;
619 struct usb_request *req;
620
xerox_lincdffcb82014-09-04 16:01:59 +0800621 dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700622 (dev->net->mtu
623 + sizeof(struct ethhdr)
624 /* size of rndis_packet_msg_type */
625 + 44
626 + 22));
627
628 list_for_each(act, &dev->tx_reqs) {
629 req = container_of(act, struct usb_request, list);
630 if (!req->buf)
631 req->buf = kmalloc(dev->tx_req_bufsize,
632 GFP_ATOMIC);
Rajkumar Raghupathy7d4a6cb2013-05-23 11:37:41 +0530633
634 if (!req->buf)
635 goto free_buf;
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700636 }
Rajkumar Raghupathy7d4a6cb2013-05-23 11:37:41 +0530637 return 0;
638
639free_buf:
640 /* tx_req_bufsize = 0 retries mem alloc on next eth_start_xmit */
641 dev->tx_req_bufsize = 0;
642 list_for_each(act, &dev->tx_reqs) {
643 req = container_of(act, struct usb_request, list);
644 kfree(req->buf);
645 req->buf = NULL;
646 }
647 return -ENOMEM;
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700648}
649
Stephen Hemminger25a79c42009-08-31 19:50:45 +0000650static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
651 struct net_device *net)
David Brownell2b3d9422008-06-19 18:19:28 -0700652{
653 struct eth_dev *dev = netdev_priv(net);
Jim Baxter6d3865f2014-07-07 18:33:18 +0100654 int length = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700655 int retval;
656 struct usb_request *req = NULL;
657 unsigned long flags;
658 struct usb_ep *in;
659 u16 cdc_filter;
Mayank Rana68f74742013-02-15 14:55:30 +0530660 bool multi_pkt_xfer = false;
David Brownell2b3d9422008-06-19 18:19:28 -0700661
662 spin_lock_irqsave(&dev->lock, flags);
663 if (dev->port_usb) {
664 in = dev->port_usb->in_ep;
665 cdc_filter = dev->port_usb->cdc_filter;
Mayank Rana68f74742013-02-15 14:55:30 +0530666 multi_pkt_xfer = dev->port_usb->multi_pkt_xfer;
David Brownell2b3d9422008-06-19 18:19:28 -0700667 } else {
668 in = NULL;
669 cdc_filter = 0;
670 }
671 spin_unlock_irqrestore(&dev->lock, flags);
672
Jim Baxter6d3865f2014-07-07 18:33:18 +0100673 if (skb && !in) {
David Brownell2b3d9422008-06-19 18:19:28 -0700674 dev_kfree_skb_any(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000675 return NETDEV_TX_OK;
David Brownell2b3d9422008-06-19 18:19:28 -0700676 }
677
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700678 /* Allocate memory for tx_reqs to support multi packet transfer */
Rajkumar Raghupathy7d4a6cb2013-05-23 11:37:41 +0530679 if (multi_pkt_xfer && !dev->tx_req_bufsize) {
680 retval = alloc_tx_buffer(dev);
681 if (retval < 0)
682 return -ENOMEM;
683 }
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700684
David Brownell2b3d9422008-06-19 18:19:28 -0700685 /* apply outgoing CDC or RNDIS filters */
Jim Baxter6d3865f2014-07-07 18:33:18 +0100686 if (skb && !is_promisc(cdc_filter)) {
David Brownell2b3d9422008-06-19 18:19:28 -0700687 u8 *dest = skb->data;
688
689 if (is_multicast_ether_addr(dest)) {
690 u16 type;
691
692 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
693 * SET_ETHERNET_MULTICAST_FILTERS requests
694 */
695 if (is_broadcast_ether_addr(dest))
696 type = USB_CDC_PACKET_TYPE_BROADCAST;
697 else
698 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
699 if (!(cdc_filter & type)) {
700 dev_kfree_skb_any(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000701 return NETDEV_TX_OK;
David Brownell2b3d9422008-06-19 18:19:28 -0700702 }
703 }
704 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
705 }
706
707 spin_lock_irqsave(&dev->req_lock, flags);
708 /*
709 * this freelist can be empty if an interrupt triggered disconnect()
710 * and reconfigured the gadget (shutting down this queue) after the
711 * network stack decided to xmit but before we got the spinlock.
712 */
713 if (list_empty(&dev->tx_reqs)) {
714 spin_unlock_irqrestore(&dev->req_lock, flags);
Patrick McHardy5b548142009-06-12 06:22:29 +0000715 return NETDEV_TX_BUSY;
David Brownell2b3d9422008-06-19 18:19:28 -0700716 }
717
718 req = container_of(dev->tx_reqs.next, struct usb_request, list);
719 list_del(&req->list);
720
721 /* temporarily stop TX queue when the freelist empties */
722 if (list_empty(&dev->tx_reqs))
723 netif_stop_queue(net);
724 spin_unlock_irqrestore(&dev->req_lock, flags);
725
726 /* no buffer copies needed, unless the network stack did it
727 * or the hardware can't use skb buffers.
728 * or there's not enough space for extra headers we need
729 */
730 if (dev->wrap) {
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500731 unsigned long flags;
David Brownell2b3d9422008-06-19 18:19:28 -0700732
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500733 spin_lock_irqsave(&dev->lock, flags);
734 if (dev->port_usb)
735 skb = dev->wrap(dev->port_usb, skb);
Greg Kroah-Hartman3a383cc2016-09-19 11:05:43 +0200736 spin_unlock_irqrestore(&dev->lock, flags);
Jim Baxter6d3865f2014-07-07 18:33:18 +0100737 if (!skb) {
738 /* Multi frame CDC protocols may store the frame for
739 * later which is not a dropped frame.
740 */
Peter Chen88c09ea2016-07-01 15:33:29 +0800741 if (dev->port_usb &&
Greg Kroah-Hartman3a383cc2016-09-19 11:05:43 +0200742 dev->port_usb->supports_multi_frame)
Jim Baxter6d3865f2014-07-07 18:33:18 +0100743 goto multiframe;
David Brownell2b3d9422008-06-19 18:19:28 -0700744 goto drop;
Jim Baxter6d3865f2014-07-07 18:33:18 +0100745 }
David Brownell2b3d9422008-06-19 18:19:28 -0700746 }
Jim Baxter6d3865f2014-07-07 18:33:18 +0100747
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700748 spin_lock_irqsave(&dev->req_lock, flags);
749 dev->tx_skb_hold_count++;
750 spin_unlock_irqrestore(&dev->req_lock, flags);
751
Mayank Rana68f74742013-02-15 14:55:30 +0530752 if (multi_pkt_xfer) {
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700753 memcpy(req->buf + req->length, skb->data, skb->len);
754 req->length = req->length + skb->len;
755 length = req->length;
756 dev_kfree_skb_any(skb);
757
758 spin_lock_irqsave(&dev->req_lock, flags);
xerox_lincdffcb82014-09-04 16:01:59 +0800759 if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700760 if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
761 list_add(&req->list, &dev->tx_reqs);
762 spin_unlock_irqrestore(&dev->req_lock, flags);
763 goto success;
764 }
765 }
766
767 dev->no_tx_req_used++;
768 spin_unlock_irqrestore(&dev->req_lock, flags);
769
770 spin_lock_irqsave(&dev->lock, flags);
771 dev->tx_skb_hold_count = 0;
772 spin_unlock_irqrestore(&dev->lock, flags);
773 } else {
774 length = skb->len;
775 req->buf = skb->data;
776 req->context = skb;
777 }
778
David Brownell2b3d9422008-06-19 18:19:28 -0700779 req->complete = tx_complete;
780
Yauheni Kaliuta5c1168d2010-12-08 13:12:04 +0200781 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
Harish Jenny K N79775f42016-09-09 11:30:41 +0200782 if (dev->port_usb &&
783 dev->port_usb->is_fixed &&
Yauheni Kaliuta5c1168d2010-12-08 13:12:04 +0200784 length == dev->port_usb->fixed_in_len &&
785 (length % in->maxpacket) == 0)
786 req->zero = 0;
787 else
788 req->zero = 1;
789
David Brownell2b3d9422008-06-19 18:19:28 -0700790 /* use zlp framing on tx for strict CDC-Ether conformance,
791 * though any robust network rx path ignores extra padding.
792 * and some hardware doesn't like to write zlps.
793 */
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700794 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
795 req->zero = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700796 length++;
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700797 }
David Brownell2b3d9422008-06-19 18:19:28 -0700798
799 req->length = length;
800
Badhri Jagan Sridharancb6a7862014-09-18 10:42:41 -0700801 /* throttle highspeed IRQ rate back slightly */
802 if (gadget_is_dualspeed(dev->gadget) &&
803 (dev->gadget->speed == USB_SPEED_HIGH)) {
804 dev->tx_qlen++;
Praneeth Bajjuri6b4b51c2015-01-22 16:38:56 -0600805 if (dev->tx_qlen == (dev->qmult/2)) {
Badhri Jagan Sridharancb6a7862014-09-18 10:42:41 -0700806 req->no_interrupt = 0;
807 dev->tx_qlen = 0;
808 } else {
809 req->no_interrupt = 1;
810 }
811 } else {
812 req->no_interrupt = 0;
813 }
David Brownell2b3d9422008-06-19 18:19:28 -0700814
815 retval = usb_ep_queue(in, req, GFP_ATOMIC);
816 switch (retval) {
817 default:
818 DBG(dev, "tx queue err %d\n", retval);
819 break;
820 case 0:
Florian Westphal860e9532016-05-03 16:33:13 +0200821 netif_trans_update(net);
David Brownell2b3d9422008-06-19 18:19:28 -0700822 }
823
824 if (retval) {
Mayank Rana68f74742013-02-15 14:55:30 +0530825 if (!multi_pkt_xfer)
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700826 dev_kfree_skb_any(skb);
Pavankumar Kondetib8a33622013-04-01 18:13:32 +0530827 else
828 req->length = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700829drop:
830 dev->net->stats.tx_dropped++;
Jim Baxter6d3865f2014-07-07 18:33:18 +0100831multiframe:
David Brownell2b3d9422008-06-19 18:19:28 -0700832 spin_lock_irqsave(&dev->req_lock, flags);
833 if (list_empty(&dev->tx_reqs))
834 netif_start_queue(net);
835 list_add(&req->list, &dev->tx_reqs);
836 spin_unlock_irqrestore(&dev->req_lock, flags);
837 }
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -0700838success:
Patrick McHardy6ed10652009-06-23 06:03:08 +0000839 return NETDEV_TX_OK;
David Brownell2b3d9422008-06-19 18:19:28 -0700840}
841
842/*-------------------------------------------------------------------------*/
843
844static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
845{
846 DBG(dev, "%s\n", __func__);
847
848 /* fill the rx queue */
849 rx_fill(dev, gfp_flags);
850
851 /* and open the tx floodgates */
Badhri Jagan Sridharancb6a7862014-09-18 10:42:41 -0700852 dev->tx_qlen = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700853 netif_wake_queue(dev->net);
854}
855
856static int eth_open(struct net_device *net)
857{
858 struct eth_dev *dev = netdev_priv(net);
859 struct gether *link;
860
861 DBG(dev, "%s\n", __func__);
862 if (netif_carrier_ok(dev->net))
863 eth_start(dev, GFP_KERNEL);
864
865 spin_lock_irq(&dev->lock);
866 link = dev->port_usb;
867 if (link && link->open)
868 link->open(link);
869 spin_unlock_irq(&dev->lock);
870
871 return 0;
872}
873
874static int eth_stop(struct net_device *net)
875{
876 struct eth_dev *dev = netdev_priv(net);
877 unsigned long flags;
878
879 VDBG(dev, "%s\n", __func__);
880 netif_stop_queue(net);
881
882 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
883 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
884 dev->net->stats.rx_errors, dev->net->stats.tx_errors
885 );
886
887 /* ensure there are no more active requests */
888 spin_lock_irqsave(&dev->lock, flags);
889 if (dev->port_usb) {
890 struct gether *link = dev->port_usb;
Michael Grzeschikb1b552a2012-08-08 11:48:10 +0200891 const struct usb_endpoint_descriptor *in;
892 const struct usb_endpoint_descriptor *out;
David Brownell2b3d9422008-06-19 18:19:28 -0700893
894 if (link->close)
895 link->close(link);
896
897 /* NOTE: we have no abort-queue primitive we could use
898 * to cancel all pending I/O. Instead, we disable then
899 * reenable the endpoints ... this idiom may leave toggle
900 * wrong, but that's a self-correcting error.
901 *
902 * REVISIT: we *COULD* just let the transfers complete at
903 * their own pace; the network stack can handle old packets.
904 * For the moment we leave this here, since it works.
905 */
Hemant Kumar8a7c8122018-01-05 11:47:46 -0800906 if (link->in_ep) {
907 in = link->in_ep->desc;
908 usb_ep_disable(link->in_ep);
909 if (netif_carrier_ok(net)) {
910 DBG(dev, "host still using in endpoints\n");
911 link->in_ep->desc = in;
912 usb_ep_enable(link->in_ep);
913 }
914 }
915
916 if (link->out_ep) {
917 out = link->out_ep->desc;
918 usb_ep_disable(link->out_ep);
919 if (netif_carrier_ok(net)) {
920 DBG(dev, "host still using out endpoints\n");
921 link->out_ep->desc = out;
922 usb_ep_enable(link->out_ep);
923 }
David Brownell2b3d9422008-06-19 18:19:28 -0700924 }
925 }
926 spin_unlock_irqrestore(&dev->lock, flags);
927
928 return 0;
929}
930
931/*-------------------------------------------------------------------------*/
932
Michal Nazarewicz28824b12010-05-05 12:53:13 +0200933static int get_ether_addr(const char *str, u8 *dev_addr)
David Brownell2b3d9422008-06-19 18:19:28 -0700934{
935 if (str) {
936 unsigned i;
937
938 for (i = 0; i < 6; i++) {
939 unsigned char num;
940
941 if ((*str == '.') || (*str == ':'))
942 str++;
Andy Shevchenkoe6448142010-06-15 17:04:44 +0300943 num = hex_to_bin(*str++) << 4;
944 num |= hex_to_bin(*str++);
David Brownell2b3d9422008-06-19 18:19:28 -0700945 dev_addr [i] = num;
946 }
947 if (is_valid_ether_addr(dev_addr))
948 return 0;
949 }
Joe Perches006c9132012-07-12 22:33:11 -0700950 eth_random_addr(dev_addr);
David Brownell2b3d9422008-06-19 18:19:28 -0700951 return 1;
952}
953
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +0200954static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
955{
956 if (len < 18)
957 return -EINVAL;
958
Andy Shevchenko27f38702015-01-15 13:40:04 +0200959 snprintf(str, len, "%pM", dev_addr);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +0200960 return 18;
961}
962
Hemant Kumarfc49dbd2018-01-23 12:08:47 -0800963static int ether_ioctl(struct net_device *, struct ifreq *, int);
964
Stephen Hemminger5ec38f32009-01-07 18:05:39 -0800965static const struct net_device_ops eth_netdev_ops = {
966 .ndo_open = eth_open,
967 .ndo_stop = eth_stop,
968 .ndo_start_xmit = eth_start_xmit,
Hemant Kumarfc49dbd2018-01-23 12:08:47 -0800969 .ndo_do_ioctl = ether_ioctl,
Stephen Hemminger5ec38f32009-01-07 18:05:39 -0800970 .ndo_change_mtu = ueth_change_mtu,
971 .ndo_set_mac_address = eth_mac_addr,
972 .ndo_validate_addr = eth_validate_addr,
973};
David Brownell2b3d9422008-06-19 18:19:28 -0700974
Hemant Kumarfc49dbd2018-01-23 12:08:47 -0800975static const struct net_device_ops eth_netdev_ops_ip = {
976 .ndo_open = eth_open,
977 .ndo_stop = eth_stop,
978 .ndo_start_xmit = eth_start_xmit,
979 .ndo_do_ioctl = ether_ioctl,
980 .ndo_change_mtu = ueth_change_mtu_ip,
981 .ndo_set_mac_address = NULL,
982 .ndo_validate_addr = NULL,
983};
984
985static int rmnet_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
986{
987 struct rmnet_ioctl_extended_s ext_cmd;
988 struct eth_dev *eth_dev = netdev_priv(dev);
989 int rc = 0;
990
991 rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data,
992 sizeof(struct rmnet_ioctl_extended_s));
993
994 if (rc) {
995 DBG(eth_dev, "%s(): copy_from_user() failed\n", __func__);
996 return rc;
997 }
998
999 switch (ext_cmd.extended_ioctl) {
1000 case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
1001 ext_cmd.u.data = 0;
1002 break;
1003
1004 case RMNET_IOCTL_SET_MRU:
1005 if (netif_running(dev))
1006 return -EBUSY;
1007
1008 /* 16K max */
1009 if ((size_t)ext_cmd.u.data > 0x4000)
1010 return -EINVAL;
1011
1012 if (eth_dev->port_usb) {
1013 eth_dev->port_usb->is_fixed = true;
1014 eth_dev->port_usb->fixed_out_len =
1015 (size_t) ext_cmd.u.data;
1016 DBG(eth_dev, "[%s] rmnet_ioctl(): SET MRU to %u\n",
1017 dev->name, eth_dev->port_usb->fixed_out_len);
1018 } else {
1019 pr_err("[%s]: %s: SET MRU failed. Cable disconnected\n",
1020 dev->name, __func__);
1021 return -ENODEV;
1022 }
1023 break;
1024
1025 case RMNET_IOCTL_GET_MRU:
1026 if (eth_dev->port_usb) {
1027 ext_cmd.u.data = eth_dev->port_usb->is_fixed ?
1028 eth_dev->port_usb->fixed_out_len :
1029 dev->mtu;
1030 } else {
1031 pr_err("[%s]: %s: GET MRU failed. Cable disconnected\n",
1032 dev->name, __func__);
1033 return -ENODEV;
1034 }
1035 break;
1036
1037 case RMNET_IOCTL_GET_DRIVER_NAME:
1038 strlcpy(ext_cmd.u.if_name, dev->name,
1039 sizeof(ext_cmd.u.if_name));
1040 break;
1041
1042 default:
1043 break;
1044 }
1045
1046 rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd,
1047 sizeof(struct rmnet_ioctl_extended_s));
1048
1049 if (rc)
1050 DBG(eth_dev, "%s(): copy_to_user() failed\n", __func__);
1051 return rc;
1052}
1053
1054static int ether_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1055{
1056 struct eth_dev *eth_dev = netdev_priv(dev);
1057 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
1058 int prev_mtu = dev->mtu;
1059 u32 state, old_opmode;
1060 int rc = -EFAULT;
1061
1062 old_opmode = eth_dev->flags;
1063 /* Process IOCTL command */
1064 switch (cmd) {
1065 case RMNET_IOCTL_SET_LLP_ETHERNET: /*Set Ethernet protocol*/
1066 /* Perform Ethernet config only if in IP mode currently*/
1067 if (test_bit(RMNET_MODE_LLP_IP, &eth_dev->flags)) {
1068 ether_setup(dev);
1069 dev->mtu = prev_mtu;
1070 dev->netdev_ops = &eth_netdev_ops;
1071 clear_bit(RMNET_MODE_LLP_IP, &eth_dev->flags);
1072 set_bit(RMNET_MODE_LLP_ETH, &eth_dev->flags);
1073 DBG(eth_dev, "[%s] ioctl(): set Ethernet proto mode\n",
1074 dev->name);
1075 }
1076 if (test_bit(RMNET_MODE_LLP_ETH, &eth_dev->flags))
1077 rc = 0;
1078 break;
1079
1080 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol*/
1081 /* Perform IP config only if in Ethernet mode currently*/
1082 if (test_bit(RMNET_MODE_LLP_ETH, &eth_dev->flags)) {
1083 /* Undo config done in ether_setup() */
1084 dev->header_ops = NULL; /* No header */
1085 dev->type = ARPHRD_RAWIP;
1086 dev->hard_header_len = 0;
1087 dev->mtu = prev_mtu;
1088 dev->addr_len = 0;
1089 dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1090 dev->netdev_ops = &eth_netdev_ops_ip;
1091 clear_bit(RMNET_MODE_LLP_ETH, &eth_dev->flags);
1092 set_bit(RMNET_MODE_LLP_IP, &eth_dev->flags);
1093 DBG(eth_dev, "[%s] ioctl(): set IP protocol mode\n",
1094 dev->name);
1095 }
1096 if (test_bit(RMNET_MODE_LLP_IP, &eth_dev->flags))
1097 rc = 0;
1098 break;
1099
1100 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
1101 state = eth_dev->flags & (RMNET_MODE_LLP_ETH
1102 | RMNET_MODE_LLP_IP);
1103 if (copy_to_user(addr, &state, sizeof(state)))
1104 break;
1105 rc = 0;
1106 break;
1107
1108 case RMNET_IOCTL_SET_RX_HEADROOM: /* Set RX headroom */
1109 if (copy_from_user(&eth_dev->rx_needed_headroom, addr,
1110 sizeof(eth_dev->rx_needed_headroom)))
1111 break;
1112 DBG(eth_dev, "[%s] ioctl(): set RX HEADROOM: %x\n",
1113 dev->name, eth_dev->rx_needed_headroom);
1114 rc = 0;
1115 break;
1116
1117 case RMNET_IOCTL_EXTENDED:
1118 rc = rmnet_ioctl_extended(dev, ifr);
1119 break;
1120
1121 default:
1122 pr_err("[%s] error: ioctl called for unsupported cmd[%d]",
1123 dev->name, cmd);
1124 rc = -EINVAL;
1125 }
1126
1127 DBG(eth_dev, "[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08lx\n",
1128 dev->name, __func__, cmd, old_opmode, eth_dev->flags);
1129
1130 return rc;
1131}
1132
Marcel Holtmannaa790742010-01-15 22:13:58 -08001133static struct device_type gadget_type = {
1134 .name = "gadget",
1135};
1136
David Brownell2b3d9422008-06-19 18:19:28 -07001137/**
Mike Lockwood036e98b2012-05-10 10:08:02 +02001138 * gether_setup_name - initialize one ethernet-over-usb link
David Brownell2b3d9422008-06-19 18:19:28 -07001139 * @g: gadget to associated with these links
1140 * @ethaddr: NULL, or a buffer in which the ethernet address of the
1141 * host side of the link is recorded
Mike Lockwood036e98b2012-05-10 10:08:02 +02001142 * @netname: name for network device (for example, "usb")
David Brownell2b3d9422008-06-19 18:19:28 -07001143 * Context: may sleep
1144 *
1145 * This sets up the single network link that may be exported by a
1146 * gadget driver using this framework. The link layer addresses are
1147 * set up using module parameters.
1148 *
Dan Carpenter574f24f2013-11-14 11:42:11 +03001149 * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
David Brownell2b3d9422008-06-19 18:19:28 -07001150 */
Andrzej Pietrasiewiczf1a18232013-05-23 09:22:03 +02001151struct eth_dev *gether_setup_name(struct usb_gadget *g,
1152 const char *dev_addr, const char *host_addr,
1153 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
David Brownell2b3d9422008-06-19 18:19:28 -07001154{
1155 struct eth_dev *dev;
1156 struct net_device *net;
1157 int status;
1158
David Brownell2b3d9422008-06-19 18:19:28 -07001159 net = alloc_etherdev(sizeof *dev);
1160 if (!net)
Sebastian Andrzej Siewiord6a01432012-12-23 21:10:12 +01001161 return ERR_PTR(-ENOMEM);
David Brownell2b3d9422008-06-19 18:19:28 -07001162
1163 dev = netdev_priv(net);
1164 spin_lock_init(&dev->lock);
1165 spin_lock_init(&dev->req_lock);
1166 INIT_WORK(&dev->work, eth_work);
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -07001167 INIT_WORK(&dev->rx_work, process_rx_w);
David Brownell2b3d9422008-06-19 18:19:28 -07001168 INIT_LIST_HEAD(&dev->tx_reqs);
1169 INIT_LIST_HEAD(&dev->rx_reqs);
1170
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -05001171 skb_queue_head_init(&dev->rx_frames);
1172
David Brownell2b3d9422008-06-19 18:19:28 -07001173 /* network device setup */
1174 dev->net = net;
Andrzej Pietrasiewiczf1a18232013-05-23 09:22:03 +02001175 dev->qmult = qmult;
Mike Lockwood036e98b2012-05-10 10:08:02 +02001176 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
David Brownell2b3d9422008-06-19 18:19:28 -07001177
1178 if (get_ether_addr(dev_addr, net->dev_addr))
1179 dev_warn(&g->dev,
1180 "using random %s ethernet address\n", "self");
1181 if (get_ether_addr(host_addr, dev->host_mac))
1182 dev_warn(&g->dev,
1183 "using random %s ethernet address\n", "host");
1184
1185 if (ethaddr)
1186 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
1187
Stephen Hemminger5ec38f32009-01-07 18:05:39 -08001188 net->netdev_ops = &eth_netdev_ops;
1189
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001190 net->ethtool_ops = &ops;
David Brownell2b3d9422008-06-19 18:19:28 -07001191
Hemant Kumarfc49dbd2018-01-23 12:08:47 -08001192 /* set operation mode to eth by default */
1193 set_bit(RMNET_MODE_LLP_ETH, &dev->flags);
1194
David Brownell2b3d9422008-06-19 18:19:28 -07001195 dev->gadget = g;
1196 SET_NETDEV_DEV(net, &g->dev);
Marcel Holtmannaa790742010-01-15 22:13:58 -08001197 SET_NETDEV_DEVTYPE(net, &gadget_type);
David Brownell2b3d9422008-06-19 18:19:28 -07001198
1199 status = register_netdev(net);
1200 if (status < 0) {
1201 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
1202 free_netdev(net);
Sebastian Andrzej Siewiord6a01432012-12-23 21:10:12 +01001203 dev = ERR_PTR(status);
David Brownell2b3d9422008-06-19 18:19:28 -07001204 } else {
Johannes Berge1749612008-10-27 15:59:26 -07001205 INFO(dev, "MAC %pM\n", net->dev_addr);
1206 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
David Brownell2b3d9422008-06-19 18:19:28 -07001207
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001208 /*
1209 * two kinds of host-initiated state changes:
Kevin Cernekee31bde1c2012-06-24 21:11:22 -07001210 * - iff DATA transfer is active, carrier is "on"
1211 * - tx queueing enabled if open *and* carrier is "on"
1212 */
1213 netif_carrier_off(net);
David Brownell2b3d9422008-06-19 18:19:28 -07001214 }
1215
Sebastian Andrzej Siewiord6a01432012-12-23 21:10:12 +01001216 return dev;
David Brownell2b3d9422008-06-19 18:19:28 -07001217}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001218EXPORT_SYMBOL_GPL(gether_setup_name);
David Brownell2b3d9422008-06-19 18:19:28 -07001219
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001220struct net_device *gether_setup_name_default(const char *netname)
1221{
1222 struct net_device *net;
1223 struct eth_dev *dev;
1224
1225 net = alloc_etherdev(sizeof(*dev));
1226 if (!net)
1227 return ERR_PTR(-ENOMEM);
1228
1229 dev = netdev_priv(net);
1230 spin_lock_init(&dev->lock);
1231 spin_lock_init(&dev->req_lock);
1232 INIT_WORK(&dev->work, eth_work);
Matthew Moeller5df32222016-03-09 20:19:25 -06001233 INIT_WORK(&dev->rx_work, process_rx_w);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001234 INIT_LIST_HEAD(&dev->tx_reqs);
1235 INIT_LIST_HEAD(&dev->rx_reqs);
1236
1237 skb_queue_head_init(&dev->rx_frames);
1238
1239 /* network device setup */
1240 dev->net = net;
1241 dev->qmult = QMULT_DEFAULT;
1242 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
1243
1244 eth_random_addr(dev->dev_mac);
1245 pr_warn("using random %s ethernet address\n", "self");
1246 eth_random_addr(dev->host_mac);
1247 pr_warn("using random %s ethernet address\n", "host");
1248
1249 net->netdev_ops = &eth_netdev_ops;
1250
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001251 net->ethtool_ops = &ops;
Hemant Kumarfc49dbd2018-01-23 12:08:47 -08001252
1253 /* set operation mode to eth by default */
1254 set_bit(RMNET_MODE_LLP_ETH, &dev->flags);
1255
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001256 SET_NETDEV_DEVTYPE(net, &gadget_type);
1257
1258 return net;
1259}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001260EXPORT_SYMBOL_GPL(gether_setup_name_default);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001261
1262int gether_register_netdev(struct net_device *net)
1263{
1264 struct eth_dev *dev;
1265 struct usb_gadget *g;
1266 struct sockaddr sa;
1267 int status;
1268
1269 if (!net->dev.parent)
1270 return -EINVAL;
1271 dev = netdev_priv(net);
1272 g = dev->gadget;
1273 status = register_netdev(net);
1274 if (status < 0) {
1275 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
1276 return status;
1277 } else {
1278 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
1279
1280 /* two kinds of host-initiated state changes:
1281 * - iff DATA transfer is active, carrier is "on"
1282 * - tx queueing enabled if open *and* carrier is "on"
1283 */
1284 netif_carrier_off(net);
1285 }
1286 sa.sa_family = net->type;
1287 memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
1288 rtnl_lock();
1289 status = dev_set_mac_address(net, &sa);
1290 rtnl_unlock();
1291 if (status)
1292 pr_warn("cannot set self ethernet address: %d\n", status);
1293 else
1294 INFO(dev, "MAC %pM\n", dev->dev_mac);
1295
1296 return status;
1297}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001298EXPORT_SYMBOL_GPL(gether_register_netdev);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001299
1300void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
1301{
1302 struct eth_dev *dev;
1303
1304 dev = netdev_priv(net);
1305 dev->gadget = g;
1306 SET_NETDEV_DEV(net, &g->dev);
1307}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001308EXPORT_SYMBOL_GPL(gether_set_gadget);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001309
1310int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
1311{
1312 struct eth_dev *dev;
1313 u8 new_addr[ETH_ALEN];
1314
1315 dev = netdev_priv(net);
1316 if (get_ether_addr(dev_addr, new_addr))
1317 return -EINVAL;
1318 memcpy(dev->dev_mac, new_addr, ETH_ALEN);
1319 return 0;
1320}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001321EXPORT_SYMBOL_GPL(gether_set_dev_addr);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001322
1323int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
1324{
1325 struct eth_dev *dev;
1326
1327 dev = netdev_priv(net);
1328 return get_ether_addr_str(dev->dev_mac, dev_addr, len);
1329}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001330EXPORT_SYMBOL_GPL(gether_get_dev_addr);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001331
1332int gether_set_host_addr(struct net_device *net, const char *host_addr)
1333{
1334 struct eth_dev *dev;
1335 u8 new_addr[ETH_ALEN];
1336
1337 dev = netdev_priv(net);
1338 if (get_ether_addr(host_addr, new_addr))
1339 return -EINVAL;
1340 memcpy(dev->host_mac, new_addr, ETH_ALEN);
1341 return 0;
1342}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001343EXPORT_SYMBOL_GPL(gether_set_host_addr);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001344
1345int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
1346{
1347 struct eth_dev *dev;
1348
1349 dev = netdev_priv(net);
1350 return get_ether_addr_str(dev->host_mac, host_addr, len);
1351}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001352EXPORT_SYMBOL_GPL(gether_get_host_addr);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001353
1354int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
1355{
1356 struct eth_dev *dev;
1357
1358 if (len < 13)
1359 return -EINVAL;
1360
1361 dev = netdev_priv(net);
1362 snprintf(host_addr, len, "%pm", dev->host_mac);
1363
1364 return strlen(host_addr);
1365}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001366EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001367
Andrzej Pietrasiewiczbf4277c2013-05-28 09:15:45 +02001368void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
1369{
1370 struct eth_dev *dev;
1371
1372 dev = netdev_priv(net);
1373 memcpy(host_mac, dev->host_mac, ETH_ALEN);
1374}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001375EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
Andrzej Pietrasiewiczbf4277c2013-05-28 09:15:45 +02001376
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001377void gether_set_qmult(struct net_device *net, unsigned qmult)
1378{
1379 struct eth_dev *dev;
1380
1381 dev = netdev_priv(net);
1382 dev->qmult = qmult;
1383}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001384EXPORT_SYMBOL_GPL(gether_set_qmult);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001385
1386unsigned gether_get_qmult(struct net_device *net)
1387{
1388 struct eth_dev *dev;
1389
1390 dev = netdev_priv(net);
1391 return dev->qmult;
1392}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001393EXPORT_SYMBOL_GPL(gether_get_qmult);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001394
1395int gether_get_ifname(struct net_device *net, char *name, int len)
1396{
1397 rtnl_lock();
1398 strlcpy(name, netdev_name(net), len);
1399 rtnl_unlock();
1400 return strlen(name);
1401}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001402EXPORT_SYMBOL_GPL(gether_get_ifname);
Andrzej Pietrasiewiczbcd4a1c2013-05-23 09:22:05 +02001403
David Brownell2b3d9422008-06-19 18:19:28 -07001404/**
1405 * gether_cleanup - remove Ethernet-over-USB device
1406 * Context: may sleep
1407 *
1408 * This is called to free all resources allocated by @gether_setup().
1409 */
Sebastian Andrzej Siewiord6a01432012-12-23 21:10:12 +01001410void gether_cleanup(struct eth_dev *dev)
David Brownell2b3d9422008-06-19 18:19:28 -07001411{
Sebastian Andrzej Siewiord6a01432012-12-23 21:10:12 +01001412 if (!dev)
David Brownell2b3d9422008-06-19 18:19:28 -07001413 return;
1414
Sebastian Andrzej Siewiord6a01432012-12-23 21:10:12 +01001415 unregister_netdev(dev->net);
1416 flush_work(&dev->work);
1417 free_netdev(dev->net);
David Brownell2b3d9422008-06-19 18:19:28 -07001418}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001419EXPORT_SYMBOL_GPL(gether_cleanup);
David Brownell2b3d9422008-06-19 18:19:28 -07001420
David Brownell2b3d9422008-06-19 18:19:28 -07001421/**
1422 * gether_connect - notify network layer that USB link is active
1423 * @link: the USB link, set up with endpoints, descriptors matching
1424 * current device speed, and any framing wrapper(s) set up.
1425 * Context: irqs blocked
1426 *
1427 * This is called to activate endpoints and let the network layer know
1428 * the connection is active ("carrier detect"). It may cause the I/O
1429 * queues to open and start letting network packets flow, but will in
1430 * any case activate the endpoints so that they respond properly to the
1431 * USB host.
1432 *
1433 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1434 * indicate some error code (negative errno), ep->driver_data values
1435 * have been overwritten.
1436 */
1437struct net_device *gether_connect(struct gether *link)
1438{
Sebastian Andrzej Siewiord6a01432012-12-23 21:10:12 +01001439 struct eth_dev *dev = link->ioport;
David Brownell2b3d9422008-06-19 18:19:28 -07001440 int result = 0;
1441
1442 if (!dev)
1443 return ERR_PTR(-EINVAL);
1444
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001445 if (link->in_ep) {
1446 link->in_ep->driver_data = dev;
1447 result = usb_ep_enable(link->in_ep);
1448 if (result != 0) {
1449 DBG(dev, "enable %s --> %d\n",
1450 link->in_ep->name, result);
1451 goto fail0;
1452 }
David Brownell2b3d9422008-06-19 18:19:28 -07001453 }
1454
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001455 if (link->out_ep) {
1456 link->out_ep->driver_data = dev;
1457 result = usb_ep_enable(link->out_ep);
1458 if (result != 0) {
1459 DBG(dev, "enable %s --> %d\n",
1460 link->out_ep->name, result);
1461 goto fail1;
1462 }
David Brownell2b3d9422008-06-19 18:19:28 -07001463 }
1464
1465 if (result == 0)
Andrzej Pietrasiewiczf1a18232013-05-23 09:22:03 +02001466 result = alloc_requests(dev, link, qlen(dev->gadget,
1467 dev->qmult));
David Brownell2b3d9422008-06-19 18:19:28 -07001468
1469 if (result == 0) {
1470 dev->zlp = link->is_zlp_ok;
Yoshihiro Shimoda05f6b0f2016-08-22 17:48:26 +09001471 dev->no_skb_reserve = link->no_skb_reserve;
Andrzej Pietrasiewiczf1a18232013-05-23 09:22:03 +02001472 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
David Brownell2b3d9422008-06-19 18:19:28 -07001473
1474 dev->header_len = link->header_len;
1475 dev->unwrap = link->unwrap;
1476 dev->wrap = link->wrap;
xerox_lin87bebf82014-08-14 14:48:44 +08001477 dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
xerox_lincdffcb82014-09-04 16:01:59 +08001478 dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
David Brownell2b3d9422008-06-19 18:19:28 -07001479
1480 spin_lock(&dev->lock);
Badhri Jagan Sridharane791ad32014-09-18 10:46:08 -07001481 dev->tx_skb_hold_count = 0;
1482 dev->no_tx_req_used = 0;
1483 dev->tx_req_bufsize = 0;
David Brownell2b3d9422008-06-19 18:19:28 -07001484 dev->port_usb = link;
David Brownell29bac7b2008-09-06 21:33:49 -07001485 if (netif_running(dev->net)) {
1486 if (link->open)
1487 link->open(link);
1488 } else {
1489 if (link->close)
1490 link->close(link);
1491 }
David Brownell2b3d9422008-06-19 18:19:28 -07001492 spin_unlock(&dev->lock);
1493
1494 netif_carrier_on(dev->net);
1495 if (netif_running(dev->net))
1496 eth_start(dev, GFP_ATOMIC);
1497
1498 /* on error, disable any endpoints */
1499 } else {
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001500 if (link->out_ep)
1501 (void) usb_ep_disable(link->out_ep);
David Brownell2b3d9422008-06-19 18:19:28 -07001502fail1:
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001503 if (link->in_ep)
1504 (void) usb_ep_disable(link->in_ep);
David Brownell2b3d9422008-06-19 18:19:28 -07001505 }
1506fail0:
1507 /* caller is responsible for cleanup on error */
1508 if (result < 0)
1509 return ERR_PTR(result);
1510 return dev->net;
1511}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001512EXPORT_SYMBOL_GPL(gether_connect);
David Brownell2b3d9422008-06-19 18:19:28 -07001513
1514/**
1515 * gether_disconnect - notify network layer that USB link is inactive
1516 * @link: the USB link, on which gether_connect() was called
1517 * Context: irqs blocked
1518 *
1519 * This is called to deactivate endpoints and let the network layer know
1520 * the connection went inactive ("no carrier").
1521 *
1522 * On return, the state is as if gether_connect() had never been called.
1523 * The endpoints are inactive, and accordingly without active USB I/O.
1524 * Pointers to endpoint descriptors and endpoint private data are nulled.
1525 */
1526void gether_disconnect(struct gether *link)
1527{
1528 struct eth_dev *dev = link->ioport;
1529 struct usb_request *req;
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -07001530 struct sk_buff *skb;
David Brownell2b3d9422008-06-19 18:19:28 -07001531
1532 WARN_ON(!dev);
1533 if (!dev)
1534 return;
1535
1536 DBG(dev, "%s\n", __func__);
1537
1538 netif_stop_queue(dev->net);
1539 netif_carrier_off(dev->net);
1540
1541 /* disable endpoints, forcing (synchronous) completion
1542 * of all pending i/o. then free the request objects
1543 * and forget about the endpoints.
1544 */
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001545 if (link->in_ep) {
1546 usb_ep_disable(link->in_ep);
David Brownell2b3d9422008-06-19 18:19:28 -07001547 spin_lock(&dev->req_lock);
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001548 while (!list_empty(&dev->tx_reqs)) {
1549 req = container_of(dev->tx_reqs.next,
1550 struct usb_request, list);
1551 list_del(&req->list);
David Brownell2b3d9422008-06-19 18:19:28 -07001552
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001553 spin_unlock(&dev->req_lock);
Rajkumar Raghupathy7d4a6cb2013-05-23 11:37:41 +05301554 if (link->multi_pkt_xfer) {
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001555 kfree(req->buf);
Rajkumar Raghupathy7d4a6cb2013-05-23 11:37:41 +05301556 req->buf = NULL;
1557 }
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001558 usb_ep_free_request(link->in_ep, req);
1559 spin_lock(&dev->req_lock);
1560 }
David Brownell2b3d9422008-06-19 18:19:28 -07001561 spin_unlock(&dev->req_lock);
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001562 link->in_ep->desc = NULL;
David Brownell2b3d9422008-06-19 18:19:28 -07001563 }
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -07001564
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001565 if (link->out_ep) {
1566 usb_ep_disable(link->out_ep);
1567 spin_lock(&dev->req_lock);
1568 while (!list_empty(&dev->rx_reqs)) {
1569 req = container_of(dev->rx_reqs.next,
1570 struct usb_request, list);
1571 list_del(&req->list);
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -07001572
Hemant Kumar8a7c8122018-01-05 11:47:46 -08001573 spin_unlock(&dev->req_lock);
1574 usb_ep_free_request(link->out_ep, req);
1575 spin_lock(&dev->req_lock);
1576 }
1577 spin_unlock(&dev->req_lock);
1578
1579 spin_lock(&dev->rx_frames.lock);
1580 while ((skb = __skb_dequeue(&dev->rx_frames)))
1581 dev_kfree_skb_any(skb);
1582 spin_unlock(&dev->rx_frames.lock);
1583
1584 link->out_ep->desc = NULL;
1585 }
David Brownell2b3d9422008-06-19 18:19:28 -07001586
1587 /* finish forgetting about this USB link episode */
1588 dev->header_len = 0;
1589 dev->unwrap = NULL;
1590 dev->wrap = NULL;
1591
1592 spin_lock(&dev->lock);
1593 dev->port_usb = NULL;
David Brownell2b3d9422008-06-19 18:19:28 -07001594 spin_unlock(&dev->lock);
1595}
Felipe Balbi0700faa2014-04-01 13:19:32 -05001596EXPORT_SYMBOL_GPL(gether_disconnect);
Andrzej Pietrasiewiczf1a18232013-05-23 09:22:03 +02001597
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -07001598static int __init gether_init(void)
1599{
1600 uether_wq = create_singlethread_workqueue("uether");
1601 if (!uether_wq) {
1602 pr_err("%s: Unable to create workqueue: uether\n", __func__);
1603 return -ENOMEM;
1604 }
1605 return 0;
1606}
1607module_init(gether_init);
1608
1609static void __exit gether_exit(void)
1610{
1611 destroy_workqueue(uether_wq);
1612
1613}
1614module_exit(gether_exit);
Andrzej Pietrasiewiczf1a18232013-05-23 09:22:03 +02001615MODULE_AUTHOR("David Brownell");
Badhri Jagan Sridharana096ba52014-09-24 18:58:23 -07001616MODULE_DESCRIPTION("ethernet over USB driver");
1617MODULE_LICENSE("GPL v2");