David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1 | /* |
| 2 | * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack |
| 3 | * |
| 4 | * Copyright (C) 2003-2005,2008 David Brownell |
| 5 | * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger |
| 6 | * Copyright (C) 2008 Nokia Corporation |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2 of the License, or |
| 11 | * (at your option) any later version. |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 12 | */ |
| 13 | |
| 14 | /* #define VERBOSE_DEBUG */ |
| 15 | |
| 16 | #include <linux/kernel.h> |
Sebastian Andrzej Siewior | 9830317 | 2012-09-10 16:30:50 +0200 | [diff] [blame] | 17 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/gfp.h> |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 19 | #include <linux/device.h> |
| 20 | #include <linux/ctype.h> |
| 21 | #include <linux/etherdevice.h> |
| 22 | #include <linux/ethtool.h> |
Ian Coolidge | 4fe5f07 | 2012-11-07 14:39:18 +0000 | [diff] [blame] | 23 | #include <linux/if_vlan.h> |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 24 | #include <linux/if_arp.h> |
| 25 | #include <linux/msm_rmnet.h> |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame^] | 26 | #include <linux/debugfs.h> |
| 27 | #include <linux/seq_file.h> |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 28 | |
| 29 | #include "u_ether.h" |
| 30 | |
| 31 | |
| 32 | /* |
| 33 | * This component encapsulates the Ethernet link glue needed to provide |
| 34 | * one (!) network link through the USB gadget stack, normally "usb0". |
| 35 | * |
| 36 | * The control and data models are handled by the function driver which |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 37 | * connects to this code; such as CDC Ethernet (ECM or EEM), |
| 38 | * "CDC Subset", or RNDIS. That includes all descriptor and endpoint |
| 39 | * management. |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 40 | * |
| 41 | * Link level addressing is handled by this component using module |
| 42 | * parameters; if no such parameters are provided, random link level |
| 43 | * addresses are used. Each end of the link uses one address. The |
| 44 | * host end address is exported in various ways, and is often recorded |
| 45 | * in configuration databases. |
| 46 | * |
| 47 | * The driver which assembles each configuration using such a link is |
| 48 | * responsible for ensuring that each configuration includes at most one |
| 49 | * instance of is network link. (The network layer provides ways for |
| 50 | * this single "physical" link to be used by multiple virtual links.) |
| 51 | */ |
| 52 | |
David Brownell | 8a1ce2c | 2008-08-18 17:43:56 -0700 | [diff] [blame] | 53 | #define UETH__VERSION "29-May-2008" |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 54 | |
Mike Looijmans | bba787a | 2015-08-05 08:54:55 +0200 | [diff] [blame] | 55 | /* Experiments show that both Linux and Windows hosts allow up to 16k |
| 56 | * frame sizes. Set the max size to 15k+52 to prevent allocating 32k |
| 57 | * blocks and still have efficient handling. */ |
| 58 | #define GETHER_MAX_ETH_FRAME_LEN 15412 |
| 59 | |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 60 | static struct workqueue_struct *uether_wq; |
| 61 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 62 | struct eth_dev { |
| 63 | /* lock is held while accessing port_usb |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 64 | */ |
| 65 | spinlock_t lock; |
| 66 | struct gether *port_usb; |
| 67 | |
| 68 | struct net_device *net; |
| 69 | struct usb_gadget *gadget; |
| 70 | |
| 71 | spinlock_t req_lock; /* guard {rx,tx}_reqs */ |
| 72 | struct list_head tx_reqs, rx_reqs; |
Badhri Jagan Sridharan | cb6a786 | 2014-09-18 10:42:41 -0700 | [diff] [blame] | 73 | unsigned tx_qlen; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 74 | /* Minimum number of TX USB request queued to UDC */ |
| 75 | #define TX_REQ_THRESHOLD 5 |
| 76 | int no_tx_req_used; |
| 77 | int tx_skb_hold_count; |
| 78 | u32 tx_req_bufsize; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 79 | |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 80 | struct sk_buff_head rx_frames; |
| 81 | |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 82 | unsigned qmult; |
| 83 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 84 | unsigned header_len; |
xerox_lin | 87bebf8 | 2014-08-14 14:48:44 +0800 | [diff] [blame] | 85 | unsigned ul_max_pkts_per_xfer; |
xerox_lin | cdffcb8 | 2014-09-04 16:01:59 +0800 | [diff] [blame] | 86 | unsigned dl_max_pkts_per_xfer; |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 87 | struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); |
| 88 | int (*unwrap)(struct gether *, |
| 89 | struct sk_buff *skb, |
| 90 | struct sk_buff_head *list); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 91 | |
| 92 | struct work_struct work; |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 93 | struct work_struct rx_work; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 94 | |
| 95 | unsigned long todo; |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 96 | unsigned long flags; |
| 97 | unsigned short rx_needed_headroom; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 98 | #define WORK_RX_MEMORY 0 |
| 99 | |
| 100 | bool zlp; |
Yoshihiro Shimoda | 05f6b0f | 2016-08-22 17:48:26 +0900 | [diff] [blame] | 101 | bool no_skb_reserve; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 102 | u8 host_mac[ETH_ALEN]; |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 103 | u8 dev_mac[ETH_ALEN]; |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame^] | 104 | unsigned long tx_throttle; |
| 105 | struct dentry *uether_dent; |
| 106 | struct dentry *uether_dfile; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 107 | }; |
| 108 | |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame^] | 109 | static void uether_debugfs_init(struct eth_dev *dev); |
| 110 | static void uether_debugfs_exit(struct eth_dev *dev); |
| 111 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 112 | /*-------------------------------------------------------------------------*/ |
| 113 | |
| 114 | #define RX_EXTRA 20 /* bytes guarding against rx overflows */ |
| 115 | |
| 116 | #define DEFAULT_QLEN 2 /* double buffering by default */ |
| 117 | |
Paul Zimmerman | 04617db | 2011-06-27 14:13:18 -0700 | [diff] [blame] | 118 | /* for dual-speed hardware, use deeper queues at high/super speed */ |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 119 | static inline int qlen(struct usb_gadget *gadget, unsigned qmult) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 120 | { |
Paul Zimmerman | 04617db | 2011-06-27 14:13:18 -0700 | [diff] [blame] | 121 | if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || |
| 122 | gadget->speed == USB_SPEED_SUPER)) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 123 | return qmult * DEFAULT_QLEN; |
| 124 | else |
| 125 | return DEFAULT_QLEN; |
| 126 | } |
| 127 | |
| 128 | /*-------------------------------------------------------------------------*/ |
| 129 | |
| 130 | /* REVISIT there must be a better way than having two sets |
| 131 | * of debug calls ... |
| 132 | */ |
| 133 | |
| 134 | #undef DBG |
| 135 | #undef VDBG |
| 136 | #undef ERROR |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 137 | #undef INFO |
| 138 | |
| 139 | #define xprintk(d, level, fmt, args...) \ |
| 140 | printk(level "%s: " fmt , (d)->net->name , ## args) |
| 141 | |
| 142 | #ifdef DEBUG |
| 143 | #undef DEBUG |
| 144 | #define DBG(dev, fmt, args...) \ |
| 145 | xprintk(dev , KERN_DEBUG , fmt , ## args) |
| 146 | #else |
| 147 | #define DBG(dev, fmt, args...) \ |
| 148 | do { } while (0) |
| 149 | #endif /* DEBUG */ |
| 150 | |
| 151 | #ifdef VERBOSE_DEBUG |
| 152 | #define VDBG DBG |
| 153 | #else |
| 154 | #define VDBG(dev, fmt, args...) \ |
| 155 | do { } while (0) |
| 156 | #endif /* DEBUG */ |
| 157 | |
| 158 | #define ERROR(dev, fmt, args...) \ |
| 159 | xprintk(dev , KERN_ERR , fmt , ## args) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 160 | #define INFO(dev, fmt, args...) \ |
| 161 | xprintk(dev , KERN_INFO , fmt , ## args) |
| 162 | |
| 163 | /*-------------------------------------------------------------------------*/ |
| 164 | |
| 165 | /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ |
| 166 | |
Stephen Hemminger | ccad637 | 2008-11-19 22:42:31 -0800 | [diff] [blame] | 167 | static int ueth_change_mtu(struct net_device *net, int new_mtu) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 168 | { |
Mike Looijmans | ab738ff | 2015-11-30 12:18:23 +0100 | [diff] [blame] | 169 | if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN) |
| 170 | return -ERANGE; |
| 171 | net->mtu = new_mtu; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 172 | |
Mike Looijmans | ab738ff | 2015-11-30 12:18:23 +0100 | [diff] [blame] | 173 | return 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 174 | } |
| 175 | |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 176 | static int ueth_change_mtu_ip(struct net_device *net, int new_mtu) |
| 177 | { |
| 178 | struct eth_dev *dev = netdev_priv(net); |
| 179 | unsigned long flags; |
| 180 | int status = 0; |
| 181 | |
| 182 | spin_lock_irqsave(&dev->lock, flags); |
| 183 | if (new_mtu <= 0) |
| 184 | status = -EINVAL; |
| 185 | else |
| 186 | net->mtu = new_mtu; |
| 187 | |
| 188 | DBG(dev, "[%s] MTU change: old=%d new=%d\n", net->name, |
| 189 | net->mtu, new_mtu); |
| 190 | spin_unlock_irqrestore(&dev->lock, flags); |
| 191 | |
| 192 | return status; |
| 193 | } |
| 194 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 195 | static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) |
| 196 | { |
Jiri Pirko | 7826d43 | 2013-01-06 00:44:26 +0000 | [diff] [blame] | 197 | struct eth_dev *dev = netdev_priv(net); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 198 | |
Jiri Pirko | 7826d43 | 2013-01-06 00:44:26 +0000 | [diff] [blame] | 199 | strlcpy(p->driver, "g_ether", sizeof(p->driver)); |
| 200 | strlcpy(p->version, UETH__VERSION, sizeof(p->version)); |
| 201 | strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); |
| 202 | strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 203 | } |
| 204 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 205 | /* REVISIT can also support: |
| 206 | * - WOL (by tracking suspends and issuing remote wakeup) |
| 207 | * - msglevel (implies updated messaging) |
| 208 | * - ... probably more ethtool ops |
| 209 | */ |
| 210 | |
Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 211 | static const struct ethtool_ops ops = { |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 212 | .get_drvinfo = eth_get_drvinfo, |
Jonathan McDowell | 237e75b | 2009-03-26 00:45:27 -0700 | [diff] [blame] | 213 | .get_link = ethtool_op_get_link, |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 214 | }; |
| 215 | |
| 216 | static void defer_kevent(struct eth_dev *dev, int flag) |
| 217 | { |
| 218 | if (test_and_set_bit(flag, &dev->todo)) |
| 219 | return; |
| 220 | if (!schedule_work(&dev->work)) |
| 221 | ERROR(dev, "kevent %d may have been dropped\n", flag); |
| 222 | else |
| 223 | DBG(dev, "kevent %d scheduled\n", flag); |
| 224 | } |
| 225 | |
| 226 | static void rx_complete(struct usb_ep *ep, struct usb_request *req); |
| 227 | |
| 228 | static int |
| 229 | rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) |
| 230 | { |
| 231 | struct sk_buff *skb; |
| 232 | int retval = -ENOMEM; |
| 233 | size_t size = 0; |
| 234 | struct usb_ep *out; |
| 235 | unsigned long flags; |
| 236 | |
| 237 | spin_lock_irqsave(&dev->lock, flags); |
| 238 | if (dev->port_usb) |
| 239 | out = dev->port_usb->out_ep; |
| 240 | else |
| 241 | out = NULL; |
| 242 | spin_unlock_irqrestore(&dev->lock, flags); |
| 243 | |
| 244 | if (!out) |
| 245 | return -ENOTCONN; |
| 246 | |
| 247 | |
| 248 | /* Padding up to RX_EXTRA handles minor disagreements with host. |
| 249 | * Normally we use the USB "terminate on short read" convention; |
| 250 | * so allow up to (N*maxpacket), since that memory is normally |
| 251 | * already allocated. Some hardware doesn't deal well with short |
| 252 | * reads (e.g. DMA must be N*maxpacket), so for now don't trim a |
| 253 | * byte off the end (to force hardware errors on overflow). |
| 254 | * |
| 255 | * RNDIS uses internal framing, and explicitly allows senders to |
| 256 | * pad to end-of-packet. That's potentially nice for speed, but |
| 257 | * means receivers can't recover lost synch on their own (because |
| 258 | * new packets don't only start after a short RX). |
| 259 | */ |
| 260 | size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; |
| 261 | size += dev->port_usb->header_len; |
| 262 | size += out->maxpacket - 1; |
| 263 | size -= size % out->maxpacket; |
| 264 | |
xerox_lin | 87bebf8 | 2014-08-14 14:48:44 +0800 | [diff] [blame] | 265 | if (dev->ul_max_pkts_per_xfer) |
| 266 | size *= dev->ul_max_pkts_per_xfer; |
| 267 | |
Yauheni Kaliuta | 5c1168d | 2010-12-08 13:12:04 +0200 | [diff] [blame] | 268 | if (dev->port_usb->is_fixed) |
Stephen Hemminger | 45d1b7a | 2011-03-01 22:40:57 -0800 | [diff] [blame] | 269 | size = max_t(size_t, size, dev->port_usb->fixed_out_len); |
Yauheni Kaliuta | 5c1168d | 2010-12-08 13:12:04 +0200 | [diff] [blame] | 270 | |
Amit Pundir | 5ff0eb2 | 2016-01-08 19:36:02 +0530 | [diff] [blame] | 271 | DBG(dev, "%s: size: %zd\n", __func__, size); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 272 | skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); |
| 273 | if (skb == NULL) { |
| 274 | DBG(dev, "no rx skb\n"); |
| 275 | goto enomem; |
| 276 | } |
| 277 | |
| 278 | /* Some platforms perform better when IP packets are aligned, |
| 279 | * but on at least one, checksumming fails otherwise. Note: |
| 280 | * RNDIS headers involve variable numbers of LE32 values. |
| 281 | */ |
Yoshihiro Shimoda | 05f6b0f | 2016-08-22 17:48:26 +0900 | [diff] [blame] | 282 | if (likely(!dev->no_skb_reserve)) |
| 283 | skb_reserve(skb, NET_IP_ALIGN); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 284 | |
| 285 | req->buf = skb->data; |
| 286 | req->length = size; |
| 287 | req->complete = rx_complete; |
| 288 | req->context = skb; |
| 289 | |
| 290 | retval = usb_ep_queue(out, req, gfp_flags); |
| 291 | if (retval == -ENOMEM) |
| 292 | enomem: |
| 293 | defer_kevent(dev, WORK_RX_MEMORY); |
| 294 | if (retval) { |
| 295 | DBG(dev, "rx submit --> %d\n", retval); |
| 296 | if (skb) |
| 297 | dev_kfree_skb_any(skb); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 298 | } |
| 299 | return retval; |
| 300 | } |
| 301 | |
| 302 | static void rx_complete(struct usb_ep *ep, struct usb_request *req) |
| 303 | { |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 304 | struct sk_buff *skb = req->context; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 305 | struct eth_dev *dev = ep->driver_data; |
| 306 | int status = req->status; |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 307 | bool queue = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 308 | |
| 309 | switch (status) { |
| 310 | |
| 311 | /* normal completion */ |
| 312 | case 0: |
| 313 | skb_put(skb, req->actual); |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 314 | |
| 315 | if (dev->unwrap) { |
| 316 | unsigned long flags; |
| 317 | |
| 318 | spin_lock_irqsave(&dev->lock, flags); |
| 319 | if (dev->port_usb) { |
| 320 | status = dev->unwrap(dev->port_usb, |
| 321 | skb, |
| 322 | &dev->rx_frames); |
Badhri Jagan Sridharan | 8424b3e | 2014-09-18 10:48:48 -0700 | [diff] [blame] | 323 | if (status == -EINVAL) |
| 324 | dev->net->stats.rx_errors++; |
| 325 | else if (status == -EOVERFLOW) |
| 326 | dev->net->stats.rx_over_errors++; |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 327 | } else { |
| 328 | dev_kfree_skb_any(skb); |
| 329 | status = -ENOTCONN; |
| 330 | } |
| 331 | spin_unlock_irqrestore(&dev->lock, flags); |
| 332 | } else { |
| 333 | skb_queue_tail(&dev->rx_frames, skb); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 334 | } |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 335 | if (!status) |
| 336 | queue = 1; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 337 | break; |
| 338 | |
| 339 | /* software-driven interface shutdown */ |
| 340 | case -ECONNRESET: /* unlink */ |
| 341 | case -ESHUTDOWN: /* disconnect etc */ |
| 342 | VDBG(dev, "rx shutdown, code %d\n", status); |
| 343 | goto quiesce; |
| 344 | |
| 345 | /* for hardware automagic (such as pxa) */ |
| 346 | case -ECONNABORTED: /* endpoint reset */ |
| 347 | DBG(dev, "rx %s reset\n", ep->name); |
| 348 | defer_kevent(dev, WORK_RX_MEMORY); |
| 349 | quiesce: |
| 350 | dev_kfree_skb_any(skb); |
| 351 | goto clean; |
| 352 | |
| 353 | /* data overrun */ |
| 354 | case -EOVERFLOW: |
| 355 | dev->net->stats.rx_over_errors++; |
| 356 | /* FALLTHROUGH */ |
| 357 | |
| 358 | default: |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 359 | queue = 1; |
| 360 | dev_kfree_skb_any(skb); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 361 | dev->net->stats.rx_errors++; |
| 362 | DBG(dev, "rx status %d\n", status); |
| 363 | break; |
| 364 | } |
| 365 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 366 | clean: |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 367 | spin_lock(&dev->req_lock); |
| 368 | list_add(&req->list, &dev->rx_reqs); |
| 369 | spin_unlock(&dev->req_lock); |
| 370 | |
| 371 | if (queue) |
| 372 | queue_work(uether_wq, &dev->rx_work); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 373 | } |
| 374 | |
| 375 | static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) |
| 376 | { |
| 377 | unsigned i; |
| 378 | struct usb_request *req; |
| 379 | |
| 380 | if (!n) |
| 381 | return -ENOMEM; |
| 382 | |
| 383 | /* queue/recycle up to N requests */ |
| 384 | i = n; |
| 385 | list_for_each_entry(req, list, list) { |
| 386 | if (i-- == 0) |
| 387 | goto extra; |
| 388 | } |
| 389 | while (i--) { |
| 390 | req = usb_ep_alloc_request(ep, GFP_ATOMIC); |
| 391 | if (!req) |
| 392 | return list_empty(list) ? -ENOMEM : 0; |
| 393 | list_add(&req->list, list); |
| 394 | } |
| 395 | return 0; |
| 396 | |
| 397 | extra: |
| 398 | /* free extras */ |
| 399 | for (;;) { |
| 400 | struct list_head *next; |
| 401 | |
| 402 | next = req->list.next; |
| 403 | list_del(&req->list); |
| 404 | usb_ep_free_request(ep, req); |
| 405 | |
| 406 | if (next == list) |
| 407 | break; |
| 408 | |
| 409 | req = container_of(next, struct usb_request, list); |
| 410 | } |
| 411 | return 0; |
| 412 | } |
| 413 | |
| 414 | static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) |
| 415 | { |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 416 | int status = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 417 | |
| 418 | spin_lock(&dev->req_lock); |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 419 | if (link->in_ep) { |
| 420 | status = prealloc(&dev->tx_reqs, link->in_ep, n); |
| 421 | if (status < 0) |
| 422 | goto fail; |
| 423 | } |
| 424 | |
| 425 | if (link->out_ep) { |
| 426 | status = prealloc(&dev->rx_reqs, link->out_ep, n); |
| 427 | if (status < 0) |
| 428 | goto fail; |
| 429 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 430 | goto done; |
| 431 | fail: |
| 432 | DBG(dev, "can't alloc requests\n"); |
| 433 | done: |
| 434 | spin_unlock(&dev->req_lock); |
| 435 | return status; |
| 436 | } |
| 437 | |
| 438 | static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) |
| 439 | { |
| 440 | struct usb_request *req; |
| 441 | unsigned long flags; |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 442 | int req_cnt = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 443 | |
| 444 | /* fill unused rxq slots with some skb */ |
| 445 | spin_lock_irqsave(&dev->req_lock, flags); |
| 446 | while (!list_empty(&dev->rx_reqs)) { |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 447 | /* break the nexus of continuous completion and re-submission*/ |
Praneeth Bajjuri | 6b4b51c | 2015-01-22 16:38:56 -0600 | [diff] [blame] | 448 | if (++req_cnt > qlen(dev->gadget, dev->qmult)) |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 449 | break; |
| 450 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 451 | req = container_of(dev->rx_reqs.next, |
| 452 | struct usb_request, list); |
| 453 | list_del_init(&req->list); |
| 454 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 455 | |
| 456 | if (rx_submit(dev, req, gfp_flags) < 0) { |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 457 | spin_lock_irqsave(&dev->req_lock, flags); |
| 458 | list_add(&req->list, &dev->rx_reqs); |
| 459 | spin_unlock_irqrestore(&dev->req_lock, flags); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 460 | defer_kevent(dev, WORK_RX_MEMORY); |
| 461 | return; |
| 462 | } |
| 463 | |
| 464 | spin_lock_irqsave(&dev->req_lock, flags); |
| 465 | } |
| 466 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 467 | } |
| 468 | |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 469 | static void process_rx_w(struct work_struct *work) |
| 470 | { |
| 471 | struct eth_dev *dev = container_of(work, struct eth_dev, rx_work); |
| 472 | struct sk_buff *skb; |
| 473 | int status = 0; |
| 474 | |
| 475 | if (!dev->port_usb) |
| 476 | return; |
| 477 | |
| 478 | while ((skb = skb_dequeue(&dev->rx_frames))) { |
| 479 | if (status < 0 |
| 480 | || ETH_HLEN > skb->len |
| 481 | || skb->len > ETH_FRAME_LEN) { |
| 482 | dev->net->stats.rx_errors++; |
| 483 | dev->net->stats.rx_length_errors++; |
| 484 | DBG(dev, "rx length %d\n", skb->len); |
| 485 | dev_kfree_skb_any(skb); |
| 486 | continue; |
| 487 | } |
| 488 | skb->protocol = eth_type_trans(skb, dev->net); |
| 489 | dev->net->stats.rx_packets++; |
| 490 | dev->net->stats.rx_bytes += skb->len; |
| 491 | |
| 492 | status = netif_rx_ni(skb); |
| 493 | } |
| 494 | |
| 495 | if (netif_running(dev->net)) |
| 496 | rx_fill(dev, GFP_KERNEL); |
| 497 | } |
| 498 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 499 | static void eth_work(struct work_struct *work) |
| 500 | { |
| 501 | struct eth_dev *dev = container_of(work, struct eth_dev, work); |
| 502 | |
| 503 | if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { |
| 504 | if (netif_running(dev->net)) |
| 505 | rx_fill(dev, GFP_KERNEL); |
| 506 | } |
| 507 | |
| 508 | if (dev->todo) |
| 509 | DBG(dev, "work done, flags = 0x%lx\n", dev->todo); |
| 510 | } |
| 511 | |
| 512 | static void tx_complete(struct usb_ep *ep, struct usb_request *req) |
| 513 | { |
| 514 | struct sk_buff *skb = req->context; |
| 515 | struct eth_dev *dev = ep->driver_data; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 516 | struct net_device *net = dev->net; |
| 517 | struct usb_request *new_req; |
| 518 | struct usb_ep *in; |
| 519 | int length; |
| 520 | int retval; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 521 | |
Rajkumar Raghupathy | d868245 | 2013-01-28 11:48:47 +0530 | [diff] [blame] | 522 | if (!dev->port_usb) { |
| 523 | usb_ep_free_request(ep, req); |
| 524 | return; |
| 525 | } |
| 526 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 527 | switch (req->status) { |
| 528 | default: |
| 529 | dev->net->stats.tx_errors++; |
| 530 | VDBG(dev, "tx err %d\n", req->status); |
| 531 | /* FALLTHROUGH */ |
| 532 | case -ECONNRESET: /* unlink */ |
| 533 | case -ESHUTDOWN: /* disconnect etc */ |
| 534 | break; |
| 535 | case 0: |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 536 | if (!req->zero) |
| 537 | dev->net->stats.tx_bytes += req->length-1; |
| 538 | else |
| 539 | dev->net->stats.tx_bytes += req->length; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 540 | } |
| 541 | dev->net->stats.tx_packets++; |
| 542 | |
| 543 | spin_lock(&dev->req_lock); |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 544 | list_add_tail(&req->list, &dev->tx_reqs); |
| 545 | |
| 546 | if (dev->port_usb->multi_pkt_xfer) { |
| 547 | dev->no_tx_req_used--; |
| 548 | req->length = 0; |
| 549 | in = dev->port_usb->in_ep; |
| 550 | |
| 551 | if (!list_empty(&dev->tx_reqs)) { |
| 552 | new_req = container_of(dev->tx_reqs.next, |
| 553 | struct usb_request, list); |
| 554 | list_del(&new_req->list); |
| 555 | spin_unlock(&dev->req_lock); |
| 556 | if (new_req->length > 0) { |
| 557 | length = new_req->length; |
| 558 | |
| 559 | /* NCM requires no zlp if transfer is |
| 560 | * dwNtbInMaxSize */ |
| 561 | if (dev->port_usb->is_fixed && |
| 562 | length == dev->port_usb->fixed_in_len && |
| 563 | (length % in->maxpacket) == 0) |
| 564 | new_req->zero = 0; |
| 565 | else |
| 566 | new_req->zero = 1; |
| 567 | |
| 568 | /* use zlp framing on tx for strict CDC-Ether |
| 569 | * conformance, though any robust network rx |
| 570 | * path ignores extra padding. and some hardware |
| 571 | * doesn't like to write zlps. |
| 572 | */ |
| 573 | if (new_req->zero && !dev->zlp && |
| 574 | (length % in->maxpacket) == 0) { |
| 575 | new_req->zero = 0; |
| 576 | length++; |
| 577 | } |
| 578 | |
| 579 | new_req->length = length; |
| 580 | retval = usb_ep_queue(in, new_req, GFP_ATOMIC); |
| 581 | switch (retval) { |
| 582 | default: |
| 583 | DBG(dev, "tx queue err %d\n", retval); |
Pavankumar Kondeti | b8a3362 | 2013-04-01 18:13:32 +0530 | [diff] [blame] | 584 | new_req->length = 0; |
| 585 | spin_lock(&dev->req_lock); |
| 586 | list_add_tail(&new_req->list, |
| 587 | &dev->tx_reqs); |
| 588 | spin_unlock(&dev->req_lock); |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 589 | break; |
| 590 | case 0: |
| 591 | spin_lock(&dev->req_lock); |
| 592 | dev->no_tx_req_used++; |
| 593 | spin_unlock(&dev->req_lock); |
Amit Pundir | 0917208 | 2016-05-30 15:19:21 +0530 | [diff] [blame] | 594 | netif_trans_update(net); |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 595 | } |
| 596 | } else { |
| 597 | spin_lock(&dev->req_lock); |
Pavankumar Kondeti | b8a3362 | 2013-04-01 18:13:32 +0530 | [diff] [blame] | 598 | /* |
| 599 | * Put the idle request at the back of the |
| 600 | * queue. The xmit function will put the |
| 601 | * unfinished request at the beginning of the |
| 602 | * queue. |
| 603 | */ |
| 604 | list_add_tail(&new_req->list, &dev->tx_reqs); |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 605 | spin_unlock(&dev->req_lock); |
| 606 | } |
| 607 | } else { |
| 608 | spin_unlock(&dev->req_lock); |
| 609 | } |
| 610 | } else { |
| 611 | spin_unlock(&dev->req_lock); |
| 612 | dev_kfree_skb_any(skb); |
| 613 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 614 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 615 | if (netif_carrier_ok(dev->net)) |
| 616 | netif_wake_queue(dev->net); |
| 617 | } |
| 618 | |
| 619 | static inline int is_promisc(u16 cdc_filter) |
| 620 | { |
| 621 | return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; |
| 622 | } |
| 623 | |
Rajkumar Raghupathy | 7d4a6cb | 2013-05-23 11:37:41 +0530 | [diff] [blame] | 624 | static int alloc_tx_buffer(struct eth_dev *dev) |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 625 | { |
| 626 | struct list_head *act; |
| 627 | struct usb_request *req; |
| 628 | |
xerox_lin | cdffcb8 | 2014-09-04 16:01:59 +0800 | [diff] [blame] | 629 | dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer * |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 630 | (dev->net->mtu |
| 631 | + sizeof(struct ethhdr) |
| 632 | /* size of rndis_packet_msg_type */ |
| 633 | + 44 |
| 634 | + 22)); |
| 635 | |
| 636 | list_for_each(act, &dev->tx_reqs) { |
| 637 | req = container_of(act, struct usb_request, list); |
| 638 | if (!req->buf) |
| 639 | req->buf = kmalloc(dev->tx_req_bufsize, |
| 640 | GFP_ATOMIC); |
Rajkumar Raghupathy | 7d4a6cb | 2013-05-23 11:37:41 +0530 | [diff] [blame] | 641 | |
| 642 | if (!req->buf) |
| 643 | goto free_buf; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 644 | } |
Rajkumar Raghupathy | 7d4a6cb | 2013-05-23 11:37:41 +0530 | [diff] [blame] | 645 | return 0; |
| 646 | |
| 647 | free_buf: |
| 648 | /* tx_req_bufsize = 0 retries mem alloc on next eth_start_xmit */ |
| 649 | dev->tx_req_bufsize = 0; |
| 650 | list_for_each(act, &dev->tx_reqs) { |
| 651 | req = container_of(act, struct usb_request, list); |
| 652 | kfree(req->buf); |
| 653 | req->buf = NULL; |
| 654 | } |
| 655 | return -ENOMEM; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 656 | } |
| 657 | |
Stephen Hemminger | 25a79c4 | 2009-08-31 19:50:45 +0000 | [diff] [blame] | 658 | static netdev_tx_t eth_start_xmit(struct sk_buff *skb, |
| 659 | struct net_device *net) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 660 | { |
| 661 | struct eth_dev *dev = netdev_priv(net); |
Jim Baxter | 6d3865f | 2014-07-07 18:33:18 +0100 | [diff] [blame] | 662 | int length = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 663 | int retval; |
| 664 | struct usb_request *req = NULL; |
| 665 | unsigned long flags; |
| 666 | struct usb_ep *in; |
| 667 | u16 cdc_filter; |
Mayank Rana | 68f7474 | 2013-02-15 14:55:30 +0530 | [diff] [blame] | 668 | bool multi_pkt_xfer = false; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 669 | |
| 670 | spin_lock_irqsave(&dev->lock, flags); |
| 671 | if (dev->port_usb) { |
| 672 | in = dev->port_usb->in_ep; |
| 673 | cdc_filter = dev->port_usb->cdc_filter; |
Mayank Rana | 68f7474 | 2013-02-15 14:55:30 +0530 | [diff] [blame] | 674 | multi_pkt_xfer = dev->port_usb->multi_pkt_xfer; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 675 | } else { |
| 676 | in = NULL; |
| 677 | cdc_filter = 0; |
| 678 | } |
| 679 | spin_unlock_irqrestore(&dev->lock, flags); |
| 680 | |
Jim Baxter | 6d3865f | 2014-07-07 18:33:18 +0100 | [diff] [blame] | 681 | if (skb && !in) { |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 682 | dev_kfree_skb_any(skb); |
Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 683 | return NETDEV_TX_OK; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 684 | } |
| 685 | |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 686 | /* Allocate memory for tx_reqs to support multi packet transfer */ |
Rajkumar Raghupathy | 7d4a6cb | 2013-05-23 11:37:41 +0530 | [diff] [blame] | 687 | if (multi_pkt_xfer && !dev->tx_req_bufsize) { |
| 688 | retval = alloc_tx_buffer(dev); |
| 689 | if (retval < 0) |
| 690 | return -ENOMEM; |
| 691 | } |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 692 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 693 | /* apply outgoing CDC or RNDIS filters */ |
Jim Baxter | 6d3865f | 2014-07-07 18:33:18 +0100 | [diff] [blame] | 694 | if (skb && !is_promisc(cdc_filter)) { |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 695 | u8 *dest = skb->data; |
| 696 | |
| 697 | if (is_multicast_ether_addr(dest)) { |
| 698 | u16 type; |
| 699 | |
| 700 | /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host |
| 701 | * SET_ETHERNET_MULTICAST_FILTERS requests |
| 702 | */ |
| 703 | if (is_broadcast_ether_addr(dest)) |
| 704 | type = USB_CDC_PACKET_TYPE_BROADCAST; |
| 705 | else |
| 706 | type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; |
| 707 | if (!(cdc_filter & type)) { |
| 708 | dev_kfree_skb_any(skb); |
Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 709 | return NETDEV_TX_OK; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 710 | } |
| 711 | } |
| 712 | /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ |
| 713 | } |
| 714 | |
| 715 | spin_lock_irqsave(&dev->req_lock, flags); |
| 716 | /* |
| 717 | * this freelist can be empty if an interrupt triggered disconnect() |
| 718 | * and reconfigured the gadget (shutting down this queue) after the |
| 719 | * network stack decided to xmit but before we got the spinlock. |
| 720 | */ |
| 721 | if (list_empty(&dev->tx_reqs)) { |
| 722 | spin_unlock_irqrestore(&dev->req_lock, flags); |
Patrick McHardy | 5b54814 | 2009-06-12 06:22:29 +0000 | [diff] [blame] | 723 | return NETDEV_TX_BUSY; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 724 | } |
| 725 | |
| 726 | req = container_of(dev->tx_reqs.next, struct usb_request, list); |
| 727 | list_del(&req->list); |
| 728 | |
| 729 | /* temporarily stop TX queue when the freelist empties */ |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame^] | 730 | if (list_empty(&dev->tx_reqs)) { |
| 731 | /* |
| 732 | * tx_throttle gives info about number of times u_ether |
| 733 | * asked network layer to stop queueing packets to it |
| 734 | * when transmit resources are unavailable |
| 735 | */ |
| 736 | dev->tx_throttle++; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 737 | netif_stop_queue(net); |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame^] | 738 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 739 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 740 | |
| 741 | /* no buffer copies needed, unless the network stack did it |
| 742 | * or the hardware can't use skb buffers. |
| 743 | * or there's not enough space for extra headers we need |
| 744 | */ |
| 745 | if (dev->wrap) { |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 746 | unsigned long flags; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 747 | |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 748 | spin_lock_irqsave(&dev->lock, flags); |
| 749 | if (dev->port_usb) |
| 750 | skb = dev->wrap(dev->port_usb, skb); |
Greg Kroah-Hartman | 3a383cc | 2016-09-19 11:05:43 +0200 | [diff] [blame] | 751 | spin_unlock_irqrestore(&dev->lock, flags); |
Jim Baxter | 6d3865f | 2014-07-07 18:33:18 +0100 | [diff] [blame] | 752 | if (!skb) { |
| 753 | /* Multi frame CDC protocols may store the frame for |
| 754 | * later which is not a dropped frame. |
| 755 | */ |
Peter Chen | 88c09ea | 2016-07-01 15:33:29 +0800 | [diff] [blame] | 756 | if (dev->port_usb && |
Greg Kroah-Hartman | 3a383cc | 2016-09-19 11:05:43 +0200 | [diff] [blame] | 757 | dev->port_usb->supports_multi_frame) |
Jim Baxter | 6d3865f | 2014-07-07 18:33:18 +0100 | [diff] [blame] | 758 | goto multiframe; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 759 | goto drop; |
Jim Baxter | 6d3865f | 2014-07-07 18:33:18 +0100 | [diff] [blame] | 760 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 761 | } |
Jim Baxter | 6d3865f | 2014-07-07 18:33:18 +0100 | [diff] [blame] | 762 | |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 763 | spin_lock_irqsave(&dev->req_lock, flags); |
| 764 | dev->tx_skb_hold_count++; |
| 765 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 766 | |
Mayank Rana | 68f7474 | 2013-02-15 14:55:30 +0530 | [diff] [blame] | 767 | if (multi_pkt_xfer) { |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 768 | memcpy(req->buf + req->length, skb->data, skb->len); |
| 769 | req->length = req->length + skb->len; |
| 770 | length = req->length; |
| 771 | dev_kfree_skb_any(skb); |
| 772 | |
| 773 | spin_lock_irqsave(&dev->req_lock, flags); |
xerox_lin | cdffcb8 | 2014-09-04 16:01:59 +0800 | [diff] [blame] | 774 | if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) { |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 775 | if (dev->no_tx_req_used > TX_REQ_THRESHOLD) { |
| 776 | list_add(&req->list, &dev->tx_reqs); |
| 777 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 778 | goto success; |
| 779 | } |
| 780 | } |
| 781 | |
| 782 | dev->no_tx_req_used++; |
| 783 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 784 | |
| 785 | spin_lock_irqsave(&dev->lock, flags); |
| 786 | dev->tx_skb_hold_count = 0; |
| 787 | spin_unlock_irqrestore(&dev->lock, flags); |
| 788 | } else { |
| 789 | length = skb->len; |
| 790 | req->buf = skb->data; |
| 791 | req->context = skb; |
| 792 | } |
| 793 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 794 | req->complete = tx_complete; |
| 795 | |
Yauheni Kaliuta | 5c1168d | 2010-12-08 13:12:04 +0200 | [diff] [blame] | 796 | /* NCM requires no zlp if transfer is dwNtbInMaxSize */ |
Harish Jenny K N | 79775f4 | 2016-09-09 11:30:41 +0200 | [diff] [blame] | 797 | if (dev->port_usb && |
| 798 | dev->port_usb->is_fixed && |
Yauheni Kaliuta | 5c1168d | 2010-12-08 13:12:04 +0200 | [diff] [blame] | 799 | length == dev->port_usb->fixed_in_len && |
| 800 | (length % in->maxpacket) == 0) |
| 801 | req->zero = 0; |
| 802 | else |
| 803 | req->zero = 1; |
| 804 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 805 | /* use zlp framing on tx for strict CDC-Ether conformance, |
| 806 | * though any robust network rx path ignores extra padding. |
| 807 | * and some hardware doesn't like to write zlps. |
| 808 | */ |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 809 | if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) { |
| 810 | req->zero = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 811 | length++; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 812 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 813 | |
| 814 | req->length = length; |
| 815 | |
Badhri Jagan Sridharan | cb6a786 | 2014-09-18 10:42:41 -0700 | [diff] [blame] | 816 | /* throttle highspeed IRQ rate back slightly */ |
| 817 | if (gadget_is_dualspeed(dev->gadget) && |
| 818 | (dev->gadget->speed == USB_SPEED_HIGH)) { |
| 819 | dev->tx_qlen++; |
Praneeth Bajjuri | 6b4b51c | 2015-01-22 16:38:56 -0600 | [diff] [blame] | 820 | if (dev->tx_qlen == (dev->qmult/2)) { |
Badhri Jagan Sridharan | cb6a786 | 2014-09-18 10:42:41 -0700 | [diff] [blame] | 821 | req->no_interrupt = 0; |
| 822 | dev->tx_qlen = 0; |
| 823 | } else { |
| 824 | req->no_interrupt = 1; |
| 825 | } |
| 826 | } else { |
| 827 | req->no_interrupt = 0; |
| 828 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 829 | |
| 830 | retval = usb_ep_queue(in, req, GFP_ATOMIC); |
| 831 | switch (retval) { |
| 832 | default: |
| 833 | DBG(dev, "tx queue err %d\n", retval); |
| 834 | break; |
| 835 | case 0: |
Florian Westphal | 860e953 | 2016-05-03 16:33:13 +0200 | [diff] [blame] | 836 | netif_trans_update(net); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 837 | } |
| 838 | |
| 839 | if (retval) { |
Mayank Rana | 68f7474 | 2013-02-15 14:55:30 +0530 | [diff] [blame] | 840 | if (!multi_pkt_xfer) |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 841 | dev_kfree_skb_any(skb); |
Pavankumar Kondeti | b8a3362 | 2013-04-01 18:13:32 +0530 | [diff] [blame] | 842 | else |
| 843 | req->length = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 844 | drop: |
| 845 | dev->net->stats.tx_dropped++; |
Jim Baxter | 6d3865f | 2014-07-07 18:33:18 +0100 | [diff] [blame] | 846 | multiframe: |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 847 | spin_lock_irqsave(&dev->req_lock, flags); |
| 848 | if (list_empty(&dev->tx_reqs)) |
| 849 | netif_start_queue(net); |
| 850 | list_add(&req->list, &dev->tx_reqs); |
| 851 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 852 | } |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 853 | success: |
Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 854 | return NETDEV_TX_OK; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 855 | } |
| 856 | |
| 857 | /*-------------------------------------------------------------------------*/ |
| 858 | |
| 859 | static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) |
| 860 | { |
| 861 | DBG(dev, "%s\n", __func__); |
| 862 | |
| 863 | /* fill the rx queue */ |
| 864 | rx_fill(dev, gfp_flags); |
| 865 | |
| 866 | /* and open the tx floodgates */ |
Badhri Jagan Sridharan | cb6a786 | 2014-09-18 10:42:41 -0700 | [diff] [blame] | 867 | dev->tx_qlen = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 868 | netif_wake_queue(dev->net); |
| 869 | } |
| 870 | |
| 871 | static int eth_open(struct net_device *net) |
| 872 | { |
| 873 | struct eth_dev *dev = netdev_priv(net); |
| 874 | struct gether *link; |
| 875 | |
| 876 | DBG(dev, "%s\n", __func__); |
| 877 | if (netif_carrier_ok(dev->net)) |
| 878 | eth_start(dev, GFP_KERNEL); |
| 879 | |
| 880 | spin_lock_irq(&dev->lock); |
| 881 | link = dev->port_usb; |
| 882 | if (link && link->open) |
| 883 | link->open(link); |
| 884 | spin_unlock_irq(&dev->lock); |
| 885 | |
| 886 | return 0; |
| 887 | } |
| 888 | |
| 889 | static int eth_stop(struct net_device *net) |
| 890 | { |
| 891 | struct eth_dev *dev = netdev_priv(net); |
| 892 | unsigned long flags; |
| 893 | |
| 894 | VDBG(dev, "%s\n", __func__); |
| 895 | netif_stop_queue(net); |
| 896 | |
| 897 | DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", |
| 898 | dev->net->stats.rx_packets, dev->net->stats.tx_packets, |
| 899 | dev->net->stats.rx_errors, dev->net->stats.tx_errors |
| 900 | ); |
| 901 | |
| 902 | /* ensure there are no more active requests */ |
| 903 | spin_lock_irqsave(&dev->lock, flags); |
| 904 | if (dev->port_usb) { |
| 905 | struct gether *link = dev->port_usb; |
Michael Grzeschik | b1b552a | 2012-08-08 11:48:10 +0200 | [diff] [blame] | 906 | const struct usb_endpoint_descriptor *in; |
| 907 | const struct usb_endpoint_descriptor *out; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 908 | |
| 909 | if (link->close) |
| 910 | link->close(link); |
| 911 | |
| 912 | /* NOTE: we have no abort-queue primitive we could use |
| 913 | * to cancel all pending I/O. Instead, we disable then |
| 914 | * reenable the endpoints ... this idiom may leave toggle |
| 915 | * wrong, but that's a self-correcting error. |
| 916 | * |
| 917 | * REVISIT: we *COULD* just let the transfers complete at |
| 918 | * their own pace; the network stack can handle old packets. |
| 919 | * For the moment we leave this here, since it works. |
| 920 | */ |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 921 | if (link->in_ep) { |
| 922 | in = link->in_ep->desc; |
| 923 | usb_ep_disable(link->in_ep); |
| 924 | if (netif_carrier_ok(net)) { |
| 925 | DBG(dev, "host still using in endpoints\n"); |
| 926 | link->in_ep->desc = in; |
| 927 | usb_ep_enable(link->in_ep); |
| 928 | } |
| 929 | } |
| 930 | |
| 931 | if (link->out_ep) { |
| 932 | out = link->out_ep->desc; |
| 933 | usb_ep_disable(link->out_ep); |
| 934 | if (netif_carrier_ok(net)) { |
| 935 | DBG(dev, "host still using out endpoints\n"); |
| 936 | link->out_ep->desc = out; |
| 937 | usb_ep_enable(link->out_ep); |
| 938 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 939 | } |
| 940 | } |
| 941 | spin_unlock_irqrestore(&dev->lock, flags); |
| 942 | |
| 943 | return 0; |
| 944 | } |
| 945 | |
| 946 | /*-------------------------------------------------------------------------*/ |
| 947 | |
Michal Nazarewicz | 28824b1 | 2010-05-05 12:53:13 +0200 | [diff] [blame] | 948 | static int get_ether_addr(const char *str, u8 *dev_addr) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 949 | { |
| 950 | if (str) { |
| 951 | unsigned i; |
| 952 | |
| 953 | for (i = 0; i < 6; i++) { |
| 954 | unsigned char num; |
| 955 | |
| 956 | if ((*str == '.') || (*str == ':')) |
| 957 | str++; |
Andy Shevchenko | e644814 | 2010-06-15 17:04:44 +0300 | [diff] [blame] | 958 | num = hex_to_bin(*str++) << 4; |
| 959 | num |= hex_to_bin(*str++); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 960 | dev_addr [i] = num; |
| 961 | } |
| 962 | if (is_valid_ether_addr(dev_addr)) |
| 963 | return 0; |
| 964 | } |
Joe Perches | 006c913 | 2012-07-12 22:33:11 -0700 | [diff] [blame] | 965 | eth_random_addr(dev_addr); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 966 | return 1; |
| 967 | } |
| 968 | |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 969 | static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) |
| 970 | { |
| 971 | if (len < 18) |
| 972 | return -EINVAL; |
| 973 | |
Andy Shevchenko | 27f3870 | 2015-01-15 13:40:04 +0200 | [diff] [blame] | 974 | snprintf(str, len, "%pM", dev_addr); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 975 | return 18; |
| 976 | } |
| 977 | |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 978 | static int ether_ioctl(struct net_device *, struct ifreq *, int); |
| 979 | |
Stephen Hemminger | 5ec38f3 | 2009-01-07 18:05:39 -0800 | [diff] [blame] | 980 | static const struct net_device_ops eth_netdev_ops = { |
| 981 | .ndo_open = eth_open, |
| 982 | .ndo_stop = eth_stop, |
| 983 | .ndo_start_xmit = eth_start_xmit, |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 984 | .ndo_do_ioctl = ether_ioctl, |
Stephen Hemminger | 5ec38f3 | 2009-01-07 18:05:39 -0800 | [diff] [blame] | 985 | .ndo_change_mtu = ueth_change_mtu, |
| 986 | .ndo_set_mac_address = eth_mac_addr, |
| 987 | .ndo_validate_addr = eth_validate_addr, |
| 988 | }; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 989 | |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 990 | static const struct net_device_ops eth_netdev_ops_ip = { |
| 991 | .ndo_open = eth_open, |
| 992 | .ndo_stop = eth_stop, |
| 993 | .ndo_start_xmit = eth_start_xmit, |
| 994 | .ndo_do_ioctl = ether_ioctl, |
| 995 | .ndo_change_mtu = ueth_change_mtu_ip, |
| 996 | .ndo_set_mac_address = NULL, |
| 997 | .ndo_validate_addr = NULL, |
| 998 | }; |
| 999 | |
| 1000 | static int rmnet_ioctl_extended(struct net_device *dev, struct ifreq *ifr) |
| 1001 | { |
| 1002 | struct rmnet_ioctl_extended_s ext_cmd; |
| 1003 | struct eth_dev *eth_dev = netdev_priv(dev); |
| 1004 | int rc = 0; |
| 1005 | |
| 1006 | rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data, |
| 1007 | sizeof(struct rmnet_ioctl_extended_s)); |
| 1008 | |
| 1009 | if (rc) { |
| 1010 | DBG(eth_dev, "%s(): copy_from_user() failed\n", __func__); |
| 1011 | return rc; |
| 1012 | } |
| 1013 | |
| 1014 | switch (ext_cmd.extended_ioctl) { |
| 1015 | case RMNET_IOCTL_GET_SUPPORTED_FEATURES: |
| 1016 | ext_cmd.u.data = 0; |
| 1017 | break; |
| 1018 | |
| 1019 | case RMNET_IOCTL_SET_MRU: |
| 1020 | if (netif_running(dev)) |
| 1021 | return -EBUSY; |
| 1022 | |
| 1023 | /* 16K max */ |
| 1024 | if ((size_t)ext_cmd.u.data > 0x4000) |
| 1025 | return -EINVAL; |
| 1026 | |
| 1027 | if (eth_dev->port_usb) { |
| 1028 | eth_dev->port_usb->is_fixed = true; |
| 1029 | eth_dev->port_usb->fixed_out_len = |
| 1030 | (size_t) ext_cmd.u.data; |
| 1031 | DBG(eth_dev, "[%s] rmnet_ioctl(): SET MRU to %u\n", |
| 1032 | dev->name, eth_dev->port_usb->fixed_out_len); |
| 1033 | } else { |
| 1034 | pr_err("[%s]: %s: SET MRU failed. Cable disconnected\n", |
| 1035 | dev->name, __func__); |
| 1036 | return -ENODEV; |
| 1037 | } |
| 1038 | break; |
| 1039 | |
| 1040 | case RMNET_IOCTL_GET_MRU: |
| 1041 | if (eth_dev->port_usb) { |
| 1042 | ext_cmd.u.data = eth_dev->port_usb->is_fixed ? |
| 1043 | eth_dev->port_usb->fixed_out_len : |
| 1044 | dev->mtu; |
| 1045 | } else { |
| 1046 | pr_err("[%s]: %s: GET MRU failed. Cable disconnected\n", |
| 1047 | dev->name, __func__); |
| 1048 | return -ENODEV; |
| 1049 | } |
| 1050 | break; |
| 1051 | |
| 1052 | case RMNET_IOCTL_GET_DRIVER_NAME: |
| 1053 | strlcpy(ext_cmd.u.if_name, dev->name, |
| 1054 | sizeof(ext_cmd.u.if_name)); |
| 1055 | break; |
| 1056 | |
| 1057 | default: |
| 1058 | break; |
| 1059 | } |
| 1060 | |
| 1061 | rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd, |
| 1062 | sizeof(struct rmnet_ioctl_extended_s)); |
| 1063 | |
| 1064 | if (rc) |
| 1065 | DBG(eth_dev, "%s(): copy_to_user() failed\n", __func__); |
| 1066 | return rc; |
| 1067 | } |
| 1068 | |
| 1069 | static int ether_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
| 1070 | { |
| 1071 | struct eth_dev *eth_dev = netdev_priv(dev); |
| 1072 | void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; |
| 1073 | int prev_mtu = dev->mtu; |
| 1074 | u32 state, old_opmode; |
| 1075 | int rc = -EFAULT; |
| 1076 | |
| 1077 | old_opmode = eth_dev->flags; |
| 1078 | /* Process IOCTL command */ |
| 1079 | switch (cmd) { |
| 1080 | case RMNET_IOCTL_SET_LLP_ETHERNET: /*Set Ethernet protocol*/ |
| 1081 | /* Perform Ethernet config only if in IP mode currently*/ |
| 1082 | if (test_bit(RMNET_MODE_LLP_IP, ð_dev->flags)) { |
| 1083 | ether_setup(dev); |
| 1084 | dev->mtu = prev_mtu; |
| 1085 | dev->netdev_ops = ð_netdev_ops; |
| 1086 | clear_bit(RMNET_MODE_LLP_IP, ð_dev->flags); |
| 1087 | set_bit(RMNET_MODE_LLP_ETH, ð_dev->flags); |
| 1088 | DBG(eth_dev, "[%s] ioctl(): set Ethernet proto mode\n", |
| 1089 | dev->name); |
| 1090 | } |
| 1091 | if (test_bit(RMNET_MODE_LLP_ETH, ð_dev->flags)) |
| 1092 | rc = 0; |
| 1093 | break; |
| 1094 | |
| 1095 | case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol*/ |
| 1096 | /* Perform IP config only if in Ethernet mode currently*/ |
| 1097 | if (test_bit(RMNET_MODE_LLP_ETH, ð_dev->flags)) { |
| 1098 | /* Undo config done in ether_setup() */ |
| 1099 | dev->header_ops = NULL; /* No header */ |
| 1100 | dev->type = ARPHRD_RAWIP; |
| 1101 | dev->hard_header_len = 0; |
| 1102 | dev->mtu = prev_mtu; |
| 1103 | dev->addr_len = 0; |
| 1104 | dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); |
| 1105 | dev->netdev_ops = ð_netdev_ops_ip; |
| 1106 | clear_bit(RMNET_MODE_LLP_ETH, ð_dev->flags); |
| 1107 | set_bit(RMNET_MODE_LLP_IP, ð_dev->flags); |
| 1108 | DBG(eth_dev, "[%s] ioctl(): set IP protocol mode\n", |
| 1109 | dev->name); |
| 1110 | } |
| 1111 | if (test_bit(RMNET_MODE_LLP_IP, ð_dev->flags)) |
| 1112 | rc = 0; |
| 1113 | break; |
| 1114 | |
| 1115 | case RMNET_IOCTL_GET_LLP: /* Get link protocol state */ |
| 1116 | state = eth_dev->flags & (RMNET_MODE_LLP_ETH |
| 1117 | | RMNET_MODE_LLP_IP); |
| 1118 | if (copy_to_user(addr, &state, sizeof(state))) |
| 1119 | break; |
| 1120 | rc = 0; |
| 1121 | break; |
| 1122 | |
| 1123 | case RMNET_IOCTL_SET_RX_HEADROOM: /* Set RX headroom */ |
| 1124 | if (copy_from_user(ð_dev->rx_needed_headroom, addr, |
| 1125 | sizeof(eth_dev->rx_needed_headroom))) |
| 1126 | break; |
| 1127 | DBG(eth_dev, "[%s] ioctl(): set RX HEADROOM: %x\n", |
| 1128 | dev->name, eth_dev->rx_needed_headroom); |
| 1129 | rc = 0; |
| 1130 | break; |
| 1131 | |
| 1132 | case RMNET_IOCTL_EXTENDED: |
| 1133 | rc = rmnet_ioctl_extended(dev, ifr); |
| 1134 | break; |
| 1135 | |
| 1136 | default: |
| 1137 | pr_err("[%s] error: ioctl called for unsupported cmd[%d]", |
| 1138 | dev->name, cmd); |
| 1139 | rc = -EINVAL; |
| 1140 | } |
| 1141 | |
| 1142 | DBG(eth_dev, "[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08lx\n", |
| 1143 | dev->name, __func__, cmd, old_opmode, eth_dev->flags); |
| 1144 | |
| 1145 | return rc; |
| 1146 | } |
| 1147 | |
Marcel Holtmann | aa79074 | 2010-01-15 22:13:58 -0800 | [diff] [blame] | 1148 | static struct device_type gadget_type = { |
| 1149 | .name = "gadget", |
| 1150 | }; |
| 1151 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1152 | /** |
Mike Lockwood | 036e98b | 2012-05-10 10:08:02 +0200 | [diff] [blame] | 1153 | * gether_setup_name - initialize one ethernet-over-usb link |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1154 | * @g: gadget to associated with these links |
| 1155 | * @ethaddr: NULL, or a buffer in which the ethernet address of the |
| 1156 | * host side of the link is recorded |
Mike Lockwood | 036e98b | 2012-05-10 10:08:02 +0200 | [diff] [blame] | 1157 | * @netname: name for network device (for example, "usb") |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1158 | * Context: may sleep |
| 1159 | * |
| 1160 | * This sets up the single network link that may be exported by a |
| 1161 | * gadget driver using this framework. The link layer addresses are |
| 1162 | * set up using module parameters. |
| 1163 | * |
Dan Carpenter | 574f24f | 2013-11-14 11:42:11 +0300 | [diff] [blame] | 1164 | * Returns an eth_dev pointer on success, or an ERR_PTR on failure. |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1165 | */ |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1166 | struct eth_dev *gether_setup_name(struct usb_gadget *g, |
| 1167 | const char *dev_addr, const char *host_addr, |
| 1168 | u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1169 | { |
| 1170 | struct eth_dev *dev; |
| 1171 | struct net_device *net; |
| 1172 | int status; |
| 1173 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1174 | net = alloc_etherdev(sizeof *dev); |
| 1175 | if (!net) |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1176 | return ERR_PTR(-ENOMEM); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1177 | |
| 1178 | dev = netdev_priv(net); |
| 1179 | spin_lock_init(&dev->lock); |
| 1180 | spin_lock_init(&dev->req_lock); |
| 1181 | INIT_WORK(&dev->work, eth_work); |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1182 | INIT_WORK(&dev->rx_work, process_rx_w); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1183 | INIT_LIST_HEAD(&dev->tx_reqs); |
| 1184 | INIT_LIST_HEAD(&dev->rx_reqs); |
| 1185 | |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 1186 | skb_queue_head_init(&dev->rx_frames); |
| 1187 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1188 | /* network device setup */ |
| 1189 | dev->net = net; |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1190 | dev->qmult = qmult; |
Mike Lockwood | 036e98b | 2012-05-10 10:08:02 +0200 | [diff] [blame] | 1191 | snprintf(net->name, sizeof(net->name), "%s%%d", netname); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1192 | |
| 1193 | if (get_ether_addr(dev_addr, net->dev_addr)) |
| 1194 | dev_warn(&g->dev, |
| 1195 | "using random %s ethernet address\n", "self"); |
| 1196 | if (get_ether_addr(host_addr, dev->host_mac)) |
| 1197 | dev_warn(&g->dev, |
| 1198 | "using random %s ethernet address\n", "host"); |
| 1199 | |
| 1200 | if (ethaddr) |
| 1201 | memcpy(ethaddr, dev->host_mac, ETH_ALEN); |
| 1202 | |
Stephen Hemminger | 5ec38f3 | 2009-01-07 18:05:39 -0800 | [diff] [blame] | 1203 | net->netdev_ops = ð_netdev_ops; |
| 1204 | |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 1205 | net->ethtool_ops = &ops; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1206 | |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 1207 | /* set operation mode to eth by default */ |
| 1208 | set_bit(RMNET_MODE_LLP_ETH, &dev->flags); |
| 1209 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1210 | dev->gadget = g; |
| 1211 | SET_NETDEV_DEV(net, &g->dev); |
Marcel Holtmann | aa79074 | 2010-01-15 22:13:58 -0800 | [diff] [blame] | 1212 | SET_NETDEV_DEVTYPE(net, &gadget_type); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1213 | |
| 1214 | status = register_netdev(net); |
| 1215 | if (status < 0) { |
| 1216 | dev_dbg(&g->dev, "register_netdev failed, %d\n", status); |
| 1217 | free_netdev(net); |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1218 | dev = ERR_PTR(status); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1219 | } else { |
Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 1220 | INFO(dev, "MAC %pM\n", net->dev_addr); |
| 1221 | INFO(dev, "HOST MAC %pM\n", dev->host_mac); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1222 | |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1223 | /* |
| 1224 | * two kinds of host-initiated state changes: |
Kevin Cernekee | 31bde1c | 2012-06-24 21:11:22 -0700 | [diff] [blame] | 1225 | * - iff DATA transfer is active, carrier is "on" |
| 1226 | * - tx queueing enabled if open *and* carrier is "on" |
| 1227 | */ |
| 1228 | netif_carrier_off(net); |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame^] | 1229 | uether_debugfs_init(dev); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1230 | } |
| 1231 | |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1232 | return dev; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1233 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1234 | EXPORT_SYMBOL_GPL(gether_setup_name); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1235 | |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1236 | struct net_device *gether_setup_name_default(const char *netname) |
| 1237 | { |
| 1238 | struct net_device *net; |
| 1239 | struct eth_dev *dev; |
| 1240 | |
| 1241 | net = alloc_etherdev(sizeof(*dev)); |
| 1242 | if (!net) |
| 1243 | return ERR_PTR(-ENOMEM); |
| 1244 | |
| 1245 | dev = netdev_priv(net); |
| 1246 | spin_lock_init(&dev->lock); |
| 1247 | spin_lock_init(&dev->req_lock); |
| 1248 | INIT_WORK(&dev->work, eth_work); |
Matthew Moeller | 5df3222 | 2016-03-09 20:19:25 -0600 | [diff] [blame] | 1249 | INIT_WORK(&dev->rx_work, process_rx_w); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1250 | INIT_LIST_HEAD(&dev->tx_reqs); |
| 1251 | INIT_LIST_HEAD(&dev->rx_reqs); |
| 1252 | |
| 1253 | skb_queue_head_init(&dev->rx_frames); |
| 1254 | |
| 1255 | /* network device setup */ |
| 1256 | dev->net = net; |
| 1257 | dev->qmult = QMULT_DEFAULT; |
| 1258 | snprintf(net->name, sizeof(net->name), "%s%%d", netname); |
| 1259 | |
| 1260 | eth_random_addr(dev->dev_mac); |
| 1261 | pr_warn("using random %s ethernet address\n", "self"); |
| 1262 | eth_random_addr(dev->host_mac); |
| 1263 | pr_warn("using random %s ethernet address\n", "host"); |
| 1264 | |
| 1265 | net->netdev_ops = ð_netdev_ops; |
| 1266 | |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 1267 | net->ethtool_ops = &ops; |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 1268 | |
| 1269 | /* set operation mode to eth by default */ |
| 1270 | set_bit(RMNET_MODE_LLP_ETH, &dev->flags); |
| 1271 | |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1272 | SET_NETDEV_DEVTYPE(net, &gadget_type); |
| 1273 | |
| 1274 | return net; |
| 1275 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1276 | EXPORT_SYMBOL_GPL(gether_setup_name_default); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1277 | |
| 1278 | int gether_register_netdev(struct net_device *net) |
| 1279 | { |
| 1280 | struct eth_dev *dev; |
| 1281 | struct usb_gadget *g; |
| 1282 | struct sockaddr sa; |
| 1283 | int status; |
| 1284 | |
| 1285 | if (!net->dev.parent) |
| 1286 | return -EINVAL; |
| 1287 | dev = netdev_priv(net); |
| 1288 | g = dev->gadget; |
| 1289 | status = register_netdev(net); |
| 1290 | if (status < 0) { |
| 1291 | dev_dbg(&g->dev, "register_netdev failed, %d\n", status); |
| 1292 | return status; |
| 1293 | } else { |
| 1294 | INFO(dev, "HOST MAC %pM\n", dev->host_mac); |
| 1295 | |
| 1296 | /* two kinds of host-initiated state changes: |
| 1297 | * - iff DATA transfer is active, carrier is "on" |
| 1298 | * - tx queueing enabled if open *and* carrier is "on" |
| 1299 | */ |
| 1300 | netif_carrier_off(net); |
| 1301 | } |
| 1302 | sa.sa_family = net->type; |
| 1303 | memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN); |
| 1304 | rtnl_lock(); |
| 1305 | status = dev_set_mac_address(net, &sa); |
| 1306 | rtnl_unlock(); |
| 1307 | if (status) |
| 1308 | pr_warn("cannot set self ethernet address: %d\n", status); |
| 1309 | else |
| 1310 | INFO(dev, "MAC %pM\n", dev->dev_mac); |
| 1311 | |
| 1312 | return status; |
| 1313 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1314 | EXPORT_SYMBOL_GPL(gether_register_netdev); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1315 | |
| 1316 | void gether_set_gadget(struct net_device *net, struct usb_gadget *g) |
| 1317 | { |
| 1318 | struct eth_dev *dev; |
| 1319 | |
| 1320 | dev = netdev_priv(net); |
| 1321 | dev->gadget = g; |
| 1322 | SET_NETDEV_DEV(net, &g->dev); |
| 1323 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1324 | EXPORT_SYMBOL_GPL(gether_set_gadget); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1325 | |
| 1326 | int gether_set_dev_addr(struct net_device *net, const char *dev_addr) |
| 1327 | { |
| 1328 | struct eth_dev *dev; |
| 1329 | u8 new_addr[ETH_ALEN]; |
| 1330 | |
| 1331 | dev = netdev_priv(net); |
| 1332 | if (get_ether_addr(dev_addr, new_addr)) |
| 1333 | return -EINVAL; |
| 1334 | memcpy(dev->dev_mac, new_addr, ETH_ALEN); |
| 1335 | return 0; |
| 1336 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1337 | EXPORT_SYMBOL_GPL(gether_set_dev_addr); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1338 | |
| 1339 | int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) |
| 1340 | { |
| 1341 | struct eth_dev *dev; |
| 1342 | |
| 1343 | dev = netdev_priv(net); |
| 1344 | return get_ether_addr_str(dev->dev_mac, dev_addr, len); |
| 1345 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1346 | EXPORT_SYMBOL_GPL(gether_get_dev_addr); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1347 | |
| 1348 | int gether_set_host_addr(struct net_device *net, const char *host_addr) |
| 1349 | { |
| 1350 | struct eth_dev *dev; |
| 1351 | u8 new_addr[ETH_ALEN]; |
| 1352 | |
| 1353 | dev = netdev_priv(net); |
| 1354 | if (get_ether_addr(host_addr, new_addr)) |
| 1355 | return -EINVAL; |
| 1356 | memcpy(dev->host_mac, new_addr, ETH_ALEN); |
| 1357 | return 0; |
| 1358 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1359 | EXPORT_SYMBOL_GPL(gether_set_host_addr); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1360 | |
| 1361 | int gether_get_host_addr(struct net_device *net, char *host_addr, int len) |
| 1362 | { |
| 1363 | struct eth_dev *dev; |
| 1364 | |
| 1365 | dev = netdev_priv(net); |
| 1366 | return get_ether_addr_str(dev->host_mac, host_addr, len); |
| 1367 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1368 | EXPORT_SYMBOL_GPL(gether_get_host_addr); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1369 | |
| 1370 | int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) |
| 1371 | { |
| 1372 | struct eth_dev *dev; |
| 1373 | |
| 1374 | if (len < 13) |
| 1375 | return -EINVAL; |
| 1376 | |
| 1377 | dev = netdev_priv(net); |
| 1378 | snprintf(host_addr, len, "%pm", dev->host_mac); |
| 1379 | |
| 1380 | return strlen(host_addr); |
| 1381 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1382 | EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1383 | |
Andrzej Pietrasiewicz | bf4277c | 2013-05-28 09:15:45 +0200 | [diff] [blame] | 1384 | void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) |
| 1385 | { |
| 1386 | struct eth_dev *dev; |
| 1387 | |
| 1388 | dev = netdev_priv(net); |
| 1389 | memcpy(host_mac, dev->host_mac, ETH_ALEN); |
| 1390 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1391 | EXPORT_SYMBOL_GPL(gether_get_host_addr_u8); |
Andrzej Pietrasiewicz | bf4277c | 2013-05-28 09:15:45 +0200 | [diff] [blame] | 1392 | |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1393 | void gether_set_qmult(struct net_device *net, unsigned qmult) |
| 1394 | { |
| 1395 | struct eth_dev *dev; |
| 1396 | |
| 1397 | dev = netdev_priv(net); |
| 1398 | dev->qmult = qmult; |
| 1399 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1400 | EXPORT_SYMBOL_GPL(gether_set_qmult); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1401 | |
| 1402 | unsigned gether_get_qmult(struct net_device *net) |
| 1403 | { |
| 1404 | struct eth_dev *dev; |
| 1405 | |
| 1406 | dev = netdev_priv(net); |
| 1407 | return dev->qmult; |
| 1408 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1409 | EXPORT_SYMBOL_GPL(gether_get_qmult); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1410 | |
| 1411 | int gether_get_ifname(struct net_device *net, char *name, int len) |
| 1412 | { |
| 1413 | rtnl_lock(); |
| 1414 | strlcpy(name, netdev_name(net), len); |
| 1415 | rtnl_unlock(); |
| 1416 | return strlen(name); |
| 1417 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1418 | EXPORT_SYMBOL_GPL(gether_get_ifname); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1419 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1420 | /** |
| 1421 | * gether_cleanup - remove Ethernet-over-USB device |
| 1422 | * Context: may sleep |
| 1423 | * |
| 1424 | * This is called to free all resources allocated by @gether_setup(). |
| 1425 | */ |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1426 | void gether_cleanup(struct eth_dev *dev) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1427 | { |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1428 | if (!dev) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1429 | return; |
| 1430 | |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame^] | 1431 | uether_debugfs_exit(dev); |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1432 | unregister_netdev(dev->net); |
| 1433 | flush_work(&dev->work); |
| 1434 | free_netdev(dev->net); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1435 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1436 | EXPORT_SYMBOL_GPL(gether_cleanup); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1437 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1438 | /** |
| 1439 | * gether_connect - notify network layer that USB link is active |
| 1440 | * @link: the USB link, set up with endpoints, descriptors matching |
| 1441 | * current device speed, and any framing wrapper(s) set up. |
| 1442 | * Context: irqs blocked |
| 1443 | * |
| 1444 | * This is called to activate endpoints and let the network layer know |
| 1445 | * the connection is active ("carrier detect"). It may cause the I/O |
| 1446 | * queues to open and start letting network packets flow, but will in |
| 1447 | * any case activate the endpoints so that they respond properly to the |
| 1448 | * USB host. |
| 1449 | * |
| 1450 | * Verify net_device pointer returned using IS_ERR(). If it doesn't |
| 1451 | * indicate some error code (negative errno), ep->driver_data values |
| 1452 | * have been overwritten. |
| 1453 | */ |
| 1454 | struct net_device *gether_connect(struct gether *link) |
| 1455 | { |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1456 | struct eth_dev *dev = link->ioport; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1457 | int result = 0; |
| 1458 | |
| 1459 | if (!dev) |
| 1460 | return ERR_PTR(-EINVAL); |
| 1461 | |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1462 | if (link->in_ep) { |
| 1463 | link->in_ep->driver_data = dev; |
| 1464 | result = usb_ep_enable(link->in_ep); |
| 1465 | if (result != 0) { |
| 1466 | DBG(dev, "enable %s --> %d\n", |
| 1467 | link->in_ep->name, result); |
| 1468 | goto fail0; |
| 1469 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1470 | } |
| 1471 | |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1472 | if (link->out_ep) { |
| 1473 | link->out_ep->driver_data = dev; |
| 1474 | result = usb_ep_enable(link->out_ep); |
| 1475 | if (result != 0) { |
| 1476 | DBG(dev, "enable %s --> %d\n", |
| 1477 | link->out_ep->name, result); |
| 1478 | goto fail1; |
| 1479 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1480 | } |
| 1481 | |
| 1482 | if (result == 0) |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1483 | result = alloc_requests(dev, link, qlen(dev->gadget, |
| 1484 | dev->qmult)); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1485 | |
| 1486 | if (result == 0) { |
| 1487 | dev->zlp = link->is_zlp_ok; |
Yoshihiro Shimoda | 05f6b0f | 2016-08-22 17:48:26 +0900 | [diff] [blame] | 1488 | dev->no_skb_reserve = link->no_skb_reserve; |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1489 | DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1490 | |
| 1491 | dev->header_len = link->header_len; |
| 1492 | dev->unwrap = link->unwrap; |
| 1493 | dev->wrap = link->wrap; |
xerox_lin | 87bebf8 | 2014-08-14 14:48:44 +0800 | [diff] [blame] | 1494 | dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer; |
xerox_lin | cdffcb8 | 2014-09-04 16:01:59 +0800 | [diff] [blame] | 1495 | dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1496 | |
| 1497 | spin_lock(&dev->lock); |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 1498 | dev->tx_skb_hold_count = 0; |
| 1499 | dev->no_tx_req_used = 0; |
| 1500 | dev->tx_req_bufsize = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1501 | dev->port_usb = link; |
David Brownell | 29bac7b | 2008-09-06 21:33:49 -0700 | [diff] [blame] | 1502 | if (netif_running(dev->net)) { |
| 1503 | if (link->open) |
| 1504 | link->open(link); |
| 1505 | } else { |
| 1506 | if (link->close) |
| 1507 | link->close(link); |
| 1508 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1509 | spin_unlock(&dev->lock); |
| 1510 | |
| 1511 | netif_carrier_on(dev->net); |
| 1512 | if (netif_running(dev->net)) |
| 1513 | eth_start(dev, GFP_ATOMIC); |
| 1514 | |
| 1515 | /* on error, disable any endpoints */ |
| 1516 | } else { |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1517 | if (link->out_ep) |
| 1518 | (void) usb_ep_disable(link->out_ep); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1519 | fail1: |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1520 | if (link->in_ep) |
| 1521 | (void) usb_ep_disable(link->in_ep); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1522 | } |
| 1523 | fail0: |
| 1524 | /* caller is responsible for cleanup on error */ |
| 1525 | if (result < 0) |
| 1526 | return ERR_PTR(result); |
| 1527 | return dev->net; |
| 1528 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1529 | EXPORT_SYMBOL_GPL(gether_connect); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1530 | |
| 1531 | /** |
| 1532 | * gether_disconnect - notify network layer that USB link is inactive |
| 1533 | * @link: the USB link, on which gether_connect() was called |
| 1534 | * Context: irqs blocked |
| 1535 | * |
| 1536 | * This is called to deactivate endpoints and let the network layer know |
| 1537 | * the connection went inactive ("no carrier"). |
| 1538 | * |
| 1539 | * On return, the state is as if gether_connect() had never been called. |
| 1540 | * The endpoints are inactive, and accordingly without active USB I/O. |
| 1541 | * Pointers to endpoint descriptors and endpoint private data are nulled. |
| 1542 | */ |
| 1543 | void gether_disconnect(struct gether *link) |
| 1544 | { |
| 1545 | struct eth_dev *dev = link->ioport; |
| 1546 | struct usb_request *req; |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1547 | struct sk_buff *skb; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1548 | |
| 1549 | WARN_ON(!dev); |
| 1550 | if (!dev) |
| 1551 | return; |
| 1552 | |
| 1553 | DBG(dev, "%s\n", __func__); |
| 1554 | |
| 1555 | netif_stop_queue(dev->net); |
| 1556 | netif_carrier_off(dev->net); |
| 1557 | |
| 1558 | /* disable endpoints, forcing (synchronous) completion |
| 1559 | * of all pending i/o. then free the request objects |
| 1560 | * and forget about the endpoints. |
| 1561 | */ |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1562 | if (link->in_ep) { |
| 1563 | usb_ep_disable(link->in_ep); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1564 | spin_lock(&dev->req_lock); |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1565 | while (!list_empty(&dev->tx_reqs)) { |
| 1566 | req = container_of(dev->tx_reqs.next, |
| 1567 | struct usb_request, list); |
| 1568 | list_del(&req->list); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1569 | |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1570 | spin_unlock(&dev->req_lock); |
Rajkumar Raghupathy | 7d4a6cb | 2013-05-23 11:37:41 +0530 | [diff] [blame] | 1571 | if (link->multi_pkt_xfer) { |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1572 | kfree(req->buf); |
Rajkumar Raghupathy | 7d4a6cb | 2013-05-23 11:37:41 +0530 | [diff] [blame] | 1573 | req->buf = NULL; |
| 1574 | } |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1575 | usb_ep_free_request(link->in_ep, req); |
| 1576 | spin_lock(&dev->req_lock); |
| 1577 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1578 | spin_unlock(&dev->req_lock); |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1579 | link->in_ep->desc = NULL; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1580 | } |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1581 | |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1582 | if (link->out_ep) { |
| 1583 | usb_ep_disable(link->out_ep); |
| 1584 | spin_lock(&dev->req_lock); |
| 1585 | while (!list_empty(&dev->rx_reqs)) { |
| 1586 | req = container_of(dev->rx_reqs.next, |
| 1587 | struct usb_request, list); |
| 1588 | list_del(&req->list); |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1589 | |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1590 | spin_unlock(&dev->req_lock); |
| 1591 | usb_ep_free_request(link->out_ep, req); |
| 1592 | spin_lock(&dev->req_lock); |
| 1593 | } |
| 1594 | spin_unlock(&dev->req_lock); |
| 1595 | |
| 1596 | spin_lock(&dev->rx_frames.lock); |
| 1597 | while ((skb = __skb_dequeue(&dev->rx_frames))) |
| 1598 | dev_kfree_skb_any(skb); |
| 1599 | spin_unlock(&dev->rx_frames.lock); |
| 1600 | |
| 1601 | link->out_ep->desc = NULL; |
| 1602 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1603 | |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame^] | 1604 | pr_debug("%s(): tx_throttle count= %lu", __func__, |
| 1605 | dev->tx_throttle); |
| 1606 | /* reset tx_throttle count */ |
| 1607 | dev->tx_throttle = 0; |
| 1608 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1609 | /* finish forgetting about this USB link episode */ |
| 1610 | dev->header_len = 0; |
| 1611 | dev->unwrap = NULL; |
| 1612 | dev->wrap = NULL; |
| 1613 | |
| 1614 | spin_lock(&dev->lock); |
| 1615 | dev->port_usb = NULL; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1616 | spin_unlock(&dev->lock); |
| 1617 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1618 | EXPORT_SYMBOL_GPL(gether_disconnect); |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1619 | |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame^] | 1620 | static int uether_stat_show(struct seq_file *s, void *unused) |
| 1621 | { |
| 1622 | struct eth_dev *dev = s->private; |
| 1623 | int ret = 0; |
| 1624 | |
| 1625 | if (dev) |
| 1626 | seq_printf(s, "tx_throttle = %lu\n", dev->tx_throttle); |
| 1627 | return ret; |
| 1628 | } |
| 1629 | |
| 1630 | static int uether_open(struct inode *inode, struct file *file) |
| 1631 | { |
| 1632 | return single_open(file, uether_stat_show, inode->i_private); |
| 1633 | } |
| 1634 | |
| 1635 | static ssize_t uether_stat_reset(struct file *file, |
| 1636 | const char __user *ubuf, size_t count, loff_t *ppos) |
| 1637 | { |
| 1638 | struct seq_file *s = file->private_data; |
| 1639 | struct eth_dev *dev = s->private; |
| 1640 | unsigned long flags; |
| 1641 | |
| 1642 | spin_lock_irqsave(&dev->lock, flags); |
| 1643 | /* Reset tx_throttle */ |
| 1644 | dev->tx_throttle = 0; |
| 1645 | spin_unlock_irqrestore(&dev->lock, flags); |
| 1646 | return count; |
| 1647 | } |
| 1648 | |
| 1649 | static const struct file_operations uether_stats_ops = { |
| 1650 | .open = uether_open, |
| 1651 | .read = seq_read, |
| 1652 | .write = uether_stat_reset, |
| 1653 | }; |
| 1654 | |
| 1655 | static void uether_debugfs_init(struct eth_dev *dev) |
| 1656 | { |
| 1657 | struct dentry *uether_dent; |
| 1658 | struct dentry *uether_dfile; |
| 1659 | |
| 1660 | uether_dent = debugfs_create_dir("uether_rndis", NULL); |
| 1661 | if (IS_ERR(uether_dent)) |
| 1662 | return; |
| 1663 | dev->uether_dent = uether_dent; |
| 1664 | |
| 1665 | uether_dfile = debugfs_create_file("status", 0644, |
| 1666 | uether_dent, dev, &uether_stats_ops); |
| 1667 | if (!uether_dfile || IS_ERR(uether_dfile)) |
| 1668 | debugfs_remove(uether_dent); |
| 1669 | dev->uether_dfile = uether_dfile; |
| 1670 | } |
| 1671 | |
| 1672 | static void uether_debugfs_exit(struct eth_dev *dev) |
| 1673 | { |
| 1674 | debugfs_remove(dev->uether_dfile); |
| 1675 | debugfs_remove(dev->uether_dent); |
| 1676 | dev->uether_dent = NULL; |
| 1677 | dev->uether_dfile = NULL; |
| 1678 | } |
| 1679 | |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1680 | static int __init gether_init(void) |
| 1681 | { |
| 1682 | uether_wq = create_singlethread_workqueue("uether"); |
| 1683 | if (!uether_wq) { |
| 1684 | pr_err("%s: Unable to create workqueue: uether\n", __func__); |
| 1685 | return -ENOMEM; |
| 1686 | } |
| 1687 | return 0; |
| 1688 | } |
| 1689 | module_init(gether_init); |
| 1690 | |
| 1691 | static void __exit gether_exit(void) |
| 1692 | { |
| 1693 | destroy_workqueue(uether_wq); |
| 1694 | |
| 1695 | } |
| 1696 | module_exit(gether_exit); |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1697 | MODULE_AUTHOR("David Brownell"); |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1698 | MODULE_DESCRIPTION("ethernet over USB driver"); |
| 1699 | MODULE_LICENSE("GPL v2"); |