David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1 | /* |
| 2 | * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack |
| 3 | * |
| 4 | * Copyright (C) 2003-2005,2008 David Brownell |
| 5 | * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger |
| 6 | * Copyright (C) 2008 Nokia Corporation |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2 of the License, or |
| 11 | * (at your option) any later version. |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 12 | */ |
| 13 | |
| 14 | /* #define VERBOSE_DEBUG */ |
| 15 | |
| 16 | #include <linux/kernel.h> |
Sebastian Andrzej Siewior | 9830317 | 2012-09-10 16:30:50 +0200 | [diff] [blame] | 17 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 18 | #include <linux/gfp.h> |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 19 | #include <linux/device.h> |
| 20 | #include <linux/ctype.h> |
| 21 | #include <linux/etherdevice.h> |
| 22 | #include <linux/ethtool.h> |
Ian Coolidge | 4fe5f07 | 2012-11-07 14:39:18 +0000 | [diff] [blame] | 23 | #include <linux/if_vlan.h> |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 24 | #include <linux/if_arp.h> |
| 25 | #include <linux/msm_rmnet.h> |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 26 | #include <linux/debugfs.h> |
| 27 | #include <linux/seq_file.h> |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 28 | |
| 29 | #include "u_ether.h" |
| 30 | |
| 31 | |
| 32 | /* |
| 33 | * This component encapsulates the Ethernet link glue needed to provide |
| 34 | * one (!) network link through the USB gadget stack, normally "usb0". |
| 35 | * |
| 36 | * The control and data models are handled by the function driver which |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 37 | * connects to this code; such as CDC Ethernet (ECM or EEM), |
| 38 | * "CDC Subset", or RNDIS. That includes all descriptor and endpoint |
| 39 | * management. |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 40 | * |
| 41 | * Link level addressing is handled by this component using module |
| 42 | * parameters; if no such parameters are provided, random link level |
| 43 | * addresses are used. Each end of the link uses one address. The |
| 44 | * host end address is exported in various ways, and is often recorded |
| 45 | * in configuration databases. |
| 46 | * |
| 47 | * The driver which assembles each configuration using such a link is |
| 48 | * responsible for ensuring that each configuration includes at most one |
| 49 | * instance of is network link. (The network layer provides ways for |
| 50 | * this single "physical" link to be used by multiple virtual links.) |
| 51 | */ |
| 52 | |
David Brownell | 8a1ce2c | 2008-08-18 17:43:56 -0700 | [diff] [blame] | 53 | #define UETH__VERSION "29-May-2008" |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 54 | |
Mike Looijmans | bba787a | 2015-08-05 08:54:55 +0200 | [diff] [blame] | 55 | /* Experiments show that both Linux and Windows hosts allow up to 16k |
| 56 | * frame sizes. Set the max size to 15k+52 to prevent allocating 32k |
| 57 | * blocks and still have efficient handling. */ |
| 58 | #define GETHER_MAX_ETH_FRAME_LEN 15412 |
| 59 | |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 60 | static struct workqueue_struct *uether_wq; |
| 61 | |
Tarun Gupta | 616d2ea | 2015-09-08 16:58:20 +0530 | [diff] [blame] | 62 | /* Extra buffer size to allocate for tx */ |
| 63 | #define EXTRA_ALLOCATION_SIZE_U_ETH 128 |
| 64 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 65 | struct eth_dev { |
| 66 | /* lock is held while accessing port_usb |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 67 | */ |
| 68 | spinlock_t lock; |
| 69 | struct gether *port_usb; |
| 70 | |
| 71 | struct net_device *net; |
| 72 | struct usb_gadget *gadget; |
| 73 | |
| 74 | spinlock_t req_lock; /* guard {rx,tx}_reqs */ |
| 75 | struct list_head tx_reqs, rx_reqs; |
Badhri Jagan Sridharan | cb6a786 | 2014-09-18 10:42:41 -0700 | [diff] [blame] | 76 | unsigned tx_qlen; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 77 | /* Minimum number of TX USB request queued to UDC */ |
Sujeet Kumar | df2a502 | 2015-02-04 16:38:33 +0530 | [diff] [blame] | 78 | #define MAX_TX_REQ_WITH_NO_INT 5 |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 79 | int no_tx_req_used; |
| 80 | int tx_skb_hold_count; |
| 81 | u32 tx_req_bufsize; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 82 | |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 83 | struct sk_buff_head rx_frames; |
| 84 | |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 85 | unsigned qmult; |
| 86 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 87 | unsigned header_len; |
xerox_lin | 87bebf8 | 2014-08-14 14:48:44 +0800 | [diff] [blame] | 88 | unsigned ul_max_pkts_per_xfer; |
xerox_lin | cdffcb8 | 2014-09-04 16:01:59 +0800 | [diff] [blame] | 89 | unsigned dl_max_pkts_per_xfer; |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 90 | struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); |
| 91 | int (*unwrap)(struct gether *, |
| 92 | struct sk_buff *skb, |
| 93 | struct sk_buff_head *list); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 94 | |
| 95 | struct work_struct work; |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 96 | struct work_struct rx_work; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 97 | |
| 98 | unsigned long todo; |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 99 | unsigned long flags; |
| 100 | unsigned short rx_needed_headroom; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 101 | #define WORK_RX_MEMORY 0 |
| 102 | |
| 103 | bool zlp; |
Yoshihiro Shimoda | 05f6b0f | 2016-08-22 17:48:26 +0900 | [diff] [blame] | 104 | bool no_skb_reserve; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 105 | u8 host_mac[ETH_ALEN]; |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 106 | u8 dev_mac[ETH_ALEN]; |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 107 | unsigned long tx_throttle; |
Manu Gautam | 354be03 | 2014-05-15 13:46:33 +0530 | [diff] [blame] | 108 | unsigned long rx_throttle; |
Vamsi Krishna | 41fc495 | 2014-06-01 20:20:15 -0700 | [diff] [blame] | 109 | unsigned int tx_pkts_rcvd; |
Tarun Gupta | 616d2ea | 2015-09-08 16:58:20 +0530 | [diff] [blame] | 110 | unsigned long skb_expand_cnt; |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 111 | struct dentry *uether_dent; |
| 112 | struct dentry *uether_dfile; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 113 | }; |
| 114 | |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 115 | static void uether_debugfs_init(struct eth_dev *dev); |
| 116 | static void uether_debugfs_exit(struct eth_dev *dev); |
| 117 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 118 | /*-------------------------------------------------------------------------*/ |
| 119 | |
| 120 | #define RX_EXTRA 20 /* bytes guarding against rx overflows */ |
| 121 | |
| 122 | #define DEFAULT_QLEN 2 /* double buffering by default */ |
| 123 | |
Vamsi Krishna | 11bdb3b | 2014-05-14 10:57:08 -0700 | [diff] [blame] | 124 | /* |
| 125 | * Usually downlink rates are higher than uplink rates and it |
| 126 | * deserve higher number of requests. For CAT-6 data rates of |
| 127 | * 300Mbps (~30 packets per milli-sec) 40 usb request may not |
| 128 | * be sufficient. At this rate and with interrupt moderation |
| 129 | * of interconnect, data can be very bursty. tx_qmult is the |
| 130 | * additional multipler on qmult. |
| 131 | */ |
Manu Gautam | 59559f2 | 2015-11-05 14:27:18 +0530 | [diff] [blame^] | 132 | static unsigned int tx_qmult = 2; |
Vamsi Krishna | 11bdb3b | 2014-05-14 10:57:08 -0700 | [diff] [blame] | 133 | module_param(tx_qmult, uint, 0644); |
| 134 | MODULE_PARM_DESC(tx_qmult, "Additional queue length multiplier for tx"); |
| 135 | |
Paul Zimmerman | 04617db | 2011-06-27 14:13:18 -0700 | [diff] [blame] | 136 | /* for dual-speed hardware, use deeper queues at high/super speed */ |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 137 | static inline int qlen(struct usb_gadget *gadget, unsigned qmult) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 138 | { |
Paul Zimmerman | 04617db | 2011-06-27 14:13:18 -0700 | [diff] [blame] | 139 | if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || |
| 140 | gadget->speed == USB_SPEED_SUPER)) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 141 | return qmult * DEFAULT_QLEN; |
| 142 | else |
| 143 | return DEFAULT_QLEN; |
| 144 | } |
| 145 | |
| 146 | /*-------------------------------------------------------------------------*/ |
Manu Gautam | 354be03 | 2014-05-15 13:46:33 +0530 | [diff] [blame] | 147 | #define U_ETHER_RX_PENDING_TSHOLD 500 |
| 148 | |
| 149 | static unsigned int u_ether_rx_pending_thld = U_ETHER_RX_PENDING_TSHOLD; |
| 150 | module_param(u_ether_rx_pending_thld, uint, 0644); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 151 | |
| 152 | /* REVISIT there must be a better way than having two sets |
| 153 | * of debug calls ... |
| 154 | */ |
| 155 | |
| 156 | #undef DBG |
| 157 | #undef VDBG |
| 158 | #undef ERROR |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 159 | #undef INFO |
| 160 | |
| 161 | #define xprintk(d, level, fmt, args...) \ |
| 162 | printk(level "%s: " fmt , (d)->net->name , ## args) |
| 163 | |
| 164 | #ifdef DEBUG |
| 165 | #undef DEBUG |
| 166 | #define DBG(dev, fmt, args...) \ |
| 167 | xprintk(dev , KERN_DEBUG , fmt , ## args) |
| 168 | #else |
| 169 | #define DBG(dev, fmt, args...) \ |
| 170 | do { } while (0) |
| 171 | #endif /* DEBUG */ |
| 172 | |
| 173 | #ifdef VERBOSE_DEBUG |
| 174 | #define VDBG DBG |
| 175 | #else |
| 176 | #define VDBG(dev, fmt, args...) \ |
| 177 | do { } while (0) |
| 178 | #endif /* DEBUG */ |
| 179 | |
| 180 | #define ERROR(dev, fmt, args...) \ |
| 181 | xprintk(dev , KERN_ERR , fmt , ## args) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 182 | #define INFO(dev, fmt, args...) \ |
| 183 | xprintk(dev , KERN_INFO , fmt , ## args) |
| 184 | |
| 185 | /*-------------------------------------------------------------------------*/ |
| 186 | |
| 187 | /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ |
| 188 | |
Stephen Hemminger | ccad637 | 2008-11-19 22:42:31 -0800 | [diff] [blame] | 189 | static int ueth_change_mtu(struct net_device *net, int new_mtu) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 190 | { |
Mike Looijmans | ab738ff | 2015-11-30 12:18:23 +0100 | [diff] [blame] | 191 | if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN) |
| 192 | return -ERANGE; |
| 193 | net->mtu = new_mtu; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 194 | |
Mike Looijmans | ab738ff | 2015-11-30 12:18:23 +0100 | [diff] [blame] | 195 | return 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 196 | } |
| 197 | |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 198 | static int ueth_change_mtu_ip(struct net_device *net, int new_mtu) |
| 199 | { |
| 200 | struct eth_dev *dev = netdev_priv(net); |
| 201 | unsigned long flags; |
| 202 | int status = 0; |
| 203 | |
| 204 | spin_lock_irqsave(&dev->lock, flags); |
| 205 | if (new_mtu <= 0) |
| 206 | status = -EINVAL; |
| 207 | else |
| 208 | net->mtu = new_mtu; |
| 209 | |
| 210 | DBG(dev, "[%s] MTU change: old=%d new=%d\n", net->name, |
| 211 | net->mtu, new_mtu); |
| 212 | spin_unlock_irqrestore(&dev->lock, flags); |
| 213 | |
| 214 | return status; |
| 215 | } |
| 216 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 217 | static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) |
| 218 | { |
Jiri Pirko | 7826d43 | 2013-01-06 00:44:26 +0000 | [diff] [blame] | 219 | struct eth_dev *dev = netdev_priv(net); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 220 | |
Jiri Pirko | 7826d43 | 2013-01-06 00:44:26 +0000 | [diff] [blame] | 221 | strlcpy(p->driver, "g_ether", sizeof(p->driver)); |
| 222 | strlcpy(p->version, UETH__VERSION, sizeof(p->version)); |
| 223 | strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); |
| 224 | strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 225 | } |
| 226 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 227 | /* REVISIT can also support: |
| 228 | * - WOL (by tracking suspends and issuing remote wakeup) |
| 229 | * - msglevel (implies updated messaging) |
| 230 | * - ... probably more ethtool ops |
| 231 | */ |
| 232 | |
Stephen Hemminger | 0fc0b73 | 2009-09-02 01:03:33 -0700 | [diff] [blame] | 233 | static const struct ethtool_ops ops = { |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 234 | .get_drvinfo = eth_get_drvinfo, |
Jonathan McDowell | 237e75b | 2009-03-26 00:45:27 -0700 | [diff] [blame] | 235 | .get_link = ethtool_op_get_link, |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 236 | }; |
| 237 | |
| 238 | static void defer_kevent(struct eth_dev *dev, int flag) |
| 239 | { |
| 240 | if (test_and_set_bit(flag, &dev->todo)) |
| 241 | return; |
| 242 | if (!schedule_work(&dev->work)) |
| 243 | ERROR(dev, "kevent %d may have been dropped\n", flag); |
| 244 | else |
| 245 | DBG(dev, "kevent %d scheduled\n", flag); |
| 246 | } |
| 247 | |
| 248 | static void rx_complete(struct usb_ep *ep, struct usb_request *req); |
| 249 | |
| 250 | static int |
| 251 | rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) |
| 252 | { |
| 253 | struct sk_buff *skb; |
| 254 | int retval = -ENOMEM; |
| 255 | size_t size = 0; |
| 256 | struct usb_ep *out; |
| 257 | unsigned long flags; |
| 258 | |
| 259 | spin_lock_irqsave(&dev->lock, flags); |
| 260 | if (dev->port_usb) |
| 261 | out = dev->port_usb->out_ep; |
| 262 | else |
| 263 | out = NULL; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 264 | |
Saket Saurabh | 3cdaa36 | 2014-05-09 17:06:00 +0530 | [diff] [blame] | 265 | if (!out) { |
| 266 | spin_unlock_irqrestore(&dev->lock, flags); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 267 | return -ENOTCONN; |
Saket Saurabh | 3cdaa36 | 2014-05-09 17:06:00 +0530 | [diff] [blame] | 268 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 269 | |
| 270 | /* Padding up to RX_EXTRA handles minor disagreements with host. |
| 271 | * Normally we use the USB "terminate on short read" convention; |
| 272 | * so allow up to (N*maxpacket), since that memory is normally |
| 273 | * already allocated. Some hardware doesn't deal well with short |
| 274 | * reads (e.g. DMA must be N*maxpacket), so for now don't trim a |
| 275 | * byte off the end (to force hardware errors on overflow). |
| 276 | * |
| 277 | * RNDIS uses internal framing, and explicitly allows senders to |
| 278 | * pad to end-of-packet. That's potentially nice for speed, but |
| 279 | * means receivers can't recover lost synch on their own (because |
| 280 | * new packets don't only start after a short RX). |
| 281 | */ |
| 282 | size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; |
| 283 | size += dev->port_usb->header_len; |
| 284 | size += out->maxpacket - 1; |
| 285 | size -= size % out->maxpacket; |
| 286 | |
xerox_lin | 87bebf8 | 2014-08-14 14:48:44 +0800 | [diff] [blame] | 287 | if (dev->ul_max_pkts_per_xfer) |
| 288 | size *= dev->ul_max_pkts_per_xfer; |
| 289 | |
Yauheni Kaliuta | 5c1168d | 2010-12-08 13:12:04 +0200 | [diff] [blame] | 290 | if (dev->port_usb->is_fixed) |
Stephen Hemminger | 45d1b7a | 2011-03-01 22:40:57 -0800 | [diff] [blame] | 291 | size = max_t(size_t, size, dev->port_usb->fixed_out_len); |
Saket Saurabh | 3cdaa36 | 2014-05-09 17:06:00 +0530 | [diff] [blame] | 292 | spin_unlock_irqrestore(&dev->lock, flags); |
Yauheni Kaliuta | 5c1168d | 2010-12-08 13:12:04 +0200 | [diff] [blame] | 293 | |
Amit Pundir | 5ff0eb2 | 2016-01-08 19:36:02 +0530 | [diff] [blame] | 294 | DBG(dev, "%s: size: %zd\n", __func__, size); |
Sujeet Kumar | 9a3cf92 | 2015-02-04 15:58:30 +0530 | [diff] [blame] | 295 | skb = alloc_skb(size, gfp_flags); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 296 | if (skb == NULL) { |
| 297 | DBG(dev, "no rx skb\n"); |
| 298 | goto enomem; |
| 299 | } |
| 300 | |
| 301 | /* Some platforms perform better when IP packets are aligned, |
| 302 | * but on at least one, checksumming fails otherwise. Note: |
| 303 | * RNDIS headers involve variable numbers of LE32 values. |
| 304 | */ |
Yoshihiro Shimoda | 05f6b0f | 2016-08-22 17:48:26 +0900 | [diff] [blame] | 305 | if (likely(!dev->no_skb_reserve)) |
Sujeet Kumar | 9a3cf92 | 2015-02-04 15:58:30 +0530 | [diff] [blame] | 306 | skb_reserve(skb, 0); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 307 | |
| 308 | req->buf = skb->data; |
| 309 | req->length = size; |
| 310 | req->complete = rx_complete; |
| 311 | req->context = skb; |
| 312 | |
| 313 | retval = usb_ep_queue(out, req, gfp_flags); |
| 314 | if (retval == -ENOMEM) |
| 315 | enomem: |
| 316 | defer_kevent(dev, WORK_RX_MEMORY); |
| 317 | if (retval) { |
| 318 | DBG(dev, "rx submit --> %d\n", retval); |
| 319 | if (skb) |
| 320 | dev_kfree_skb_any(skb); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 321 | } |
| 322 | return retval; |
| 323 | } |
| 324 | |
| 325 | static void rx_complete(struct usb_ep *ep, struct usb_request *req) |
| 326 | { |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 327 | struct sk_buff *skb = req->context; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 328 | struct eth_dev *dev = ep->driver_data; |
| 329 | int status = req->status; |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 330 | bool queue = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 331 | |
| 332 | switch (status) { |
| 333 | |
| 334 | /* normal completion */ |
| 335 | case 0: |
| 336 | skb_put(skb, req->actual); |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 337 | |
| 338 | if (dev->unwrap) { |
| 339 | unsigned long flags; |
| 340 | |
| 341 | spin_lock_irqsave(&dev->lock, flags); |
| 342 | if (dev->port_usb) { |
| 343 | status = dev->unwrap(dev->port_usb, |
| 344 | skb, |
| 345 | &dev->rx_frames); |
Badhri Jagan Sridharan | 8424b3e | 2014-09-18 10:48:48 -0700 | [diff] [blame] | 346 | if (status == -EINVAL) |
| 347 | dev->net->stats.rx_errors++; |
| 348 | else if (status == -EOVERFLOW) |
| 349 | dev->net->stats.rx_over_errors++; |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 350 | } else { |
| 351 | dev_kfree_skb_any(skb); |
| 352 | status = -ENOTCONN; |
| 353 | } |
| 354 | spin_unlock_irqrestore(&dev->lock, flags); |
| 355 | } else { |
| 356 | skb_queue_tail(&dev->rx_frames, skb); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 357 | } |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 358 | if (!status) |
| 359 | queue = 1; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 360 | break; |
| 361 | |
| 362 | /* software-driven interface shutdown */ |
| 363 | case -ECONNRESET: /* unlink */ |
| 364 | case -ESHUTDOWN: /* disconnect etc */ |
| 365 | VDBG(dev, "rx shutdown, code %d\n", status); |
| 366 | goto quiesce; |
| 367 | |
| 368 | /* for hardware automagic (such as pxa) */ |
| 369 | case -ECONNABORTED: /* endpoint reset */ |
| 370 | DBG(dev, "rx %s reset\n", ep->name); |
| 371 | defer_kevent(dev, WORK_RX_MEMORY); |
| 372 | quiesce: |
| 373 | dev_kfree_skb_any(skb); |
| 374 | goto clean; |
| 375 | |
| 376 | /* data overrun */ |
| 377 | case -EOVERFLOW: |
| 378 | dev->net->stats.rx_over_errors++; |
| 379 | /* FALLTHROUGH */ |
| 380 | |
| 381 | default: |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 382 | queue = 1; |
| 383 | dev_kfree_skb_any(skb); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 384 | dev->net->stats.rx_errors++; |
| 385 | DBG(dev, "rx status %d\n", status); |
| 386 | break; |
| 387 | } |
| 388 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 389 | clean: |
Manu Gautam | 354be03 | 2014-05-15 13:46:33 +0530 | [diff] [blame] | 390 | if (queue && dev->rx_frames.qlen <= u_ether_rx_pending_thld) { |
| 391 | if (rx_submit(dev, req, GFP_ATOMIC) < 0) { |
| 392 | spin_lock(&dev->req_lock); |
| 393 | list_add(&req->list, &dev->rx_reqs); |
| 394 | spin_unlock(&dev->req_lock); |
| 395 | } |
| 396 | } else { |
| 397 | /* rx buffers draining is delayed,defer further queuing to wq */ |
| 398 | if (queue) |
| 399 | dev->rx_throttle++; |
| 400 | spin_lock(&dev->req_lock); |
| 401 | list_add(&req->list, &dev->rx_reqs); |
| 402 | spin_unlock(&dev->req_lock); |
| 403 | } |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 404 | |
| 405 | if (queue) |
| 406 | queue_work(uether_wq, &dev->rx_work); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 407 | } |
| 408 | |
| 409 | static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) |
| 410 | { |
| 411 | unsigned i; |
| 412 | struct usb_request *req; |
| 413 | |
| 414 | if (!n) |
| 415 | return -ENOMEM; |
| 416 | |
| 417 | /* queue/recycle up to N requests */ |
| 418 | i = n; |
| 419 | list_for_each_entry(req, list, list) { |
| 420 | if (i-- == 0) |
| 421 | goto extra; |
| 422 | } |
| 423 | while (i--) { |
| 424 | req = usb_ep_alloc_request(ep, GFP_ATOMIC); |
| 425 | if (!req) |
| 426 | return list_empty(list) ? -ENOMEM : 0; |
| 427 | list_add(&req->list, list); |
| 428 | } |
| 429 | return 0; |
| 430 | |
| 431 | extra: |
| 432 | /* free extras */ |
| 433 | for (;;) { |
| 434 | struct list_head *next; |
| 435 | |
| 436 | next = req->list.next; |
| 437 | list_del(&req->list); |
| 438 | usb_ep_free_request(ep, req); |
| 439 | |
| 440 | if (next == list) |
| 441 | break; |
| 442 | |
| 443 | req = container_of(next, struct usb_request, list); |
| 444 | } |
| 445 | return 0; |
| 446 | } |
| 447 | |
| 448 | static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) |
| 449 | { |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 450 | int status = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 451 | |
| 452 | spin_lock(&dev->req_lock); |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 453 | if (link->in_ep) { |
Vamsi Krishna | 11bdb3b | 2014-05-14 10:57:08 -0700 | [diff] [blame] | 454 | status = prealloc(&dev->tx_reqs, link->in_ep, n * tx_qmult); |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 455 | if (status < 0) |
| 456 | goto fail; |
| 457 | } |
| 458 | |
| 459 | if (link->out_ep) { |
| 460 | status = prealloc(&dev->rx_reqs, link->out_ep, n); |
| 461 | if (status < 0) |
| 462 | goto fail; |
| 463 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 464 | goto done; |
| 465 | fail: |
| 466 | DBG(dev, "can't alloc requests\n"); |
| 467 | done: |
| 468 | spin_unlock(&dev->req_lock); |
| 469 | return status; |
| 470 | } |
| 471 | |
| 472 | static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) |
| 473 | { |
| 474 | struct usb_request *req; |
| 475 | unsigned long flags; |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 476 | int req_cnt = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 477 | |
| 478 | /* fill unused rxq slots with some skb */ |
| 479 | spin_lock_irqsave(&dev->req_lock, flags); |
| 480 | while (!list_empty(&dev->rx_reqs)) { |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 481 | /* break the nexus of continuous completion and re-submission*/ |
Praneeth Bajjuri | 6b4b51c | 2015-01-22 16:38:56 -0600 | [diff] [blame] | 482 | if (++req_cnt > qlen(dev->gadget, dev->qmult)) |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 483 | break; |
| 484 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 485 | req = container_of(dev->rx_reqs.next, |
| 486 | struct usb_request, list); |
| 487 | list_del_init(&req->list); |
| 488 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 489 | |
| 490 | if (rx_submit(dev, req, gfp_flags) < 0) { |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 491 | spin_lock_irqsave(&dev->req_lock, flags); |
| 492 | list_add(&req->list, &dev->rx_reqs); |
| 493 | spin_unlock_irqrestore(&dev->req_lock, flags); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 494 | defer_kevent(dev, WORK_RX_MEMORY); |
| 495 | return; |
| 496 | } |
| 497 | |
| 498 | spin_lock_irqsave(&dev->req_lock, flags); |
| 499 | } |
| 500 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 501 | } |
| 502 | |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 503 | static void process_rx_w(struct work_struct *work) |
| 504 | { |
| 505 | struct eth_dev *dev = container_of(work, struct eth_dev, rx_work); |
| 506 | struct sk_buff *skb; |
| 507 | int status = 0; |
| 508 | |
| 509 | if (!dev->port_usb) |
| 510 | return; |
| 511 | |
| 512 | while ((skb = skb_dequeue(&dev->rx_frames))) { |
| 513 | if (status < 0 |
| 514 | || ETH_HLEN > skb->len |
| 515 | || skb->len > ETH_FRAME_LEN) { |
| 516 | dev->net->stats.rx_errors++; |
| 517 | dev->net->stats.rx_length_errors++; |
| 518 | DBG(dev, "rx length %d\n", skb->len); |
| 519 | dev_kfree_skb_any(skb); |
| 520 | continue; |
| 521 | } |
| 522 | skb->protocol = eth_type_trans(skb, dev->net); |
| 523 | dev->net->stats.rx_packets++; |
| 524 | dev->net->stats.rx_bytes += skb->len; |
| 525 | |
| 526 | status = netif_rx_ni(skb); |
| 527 | } |
| 528 | |
| 529 | if (netif_running(dev->net)) |
| 530 | rx_fill(dev, GFP_KERNEL); |
| 531 | } |
| 532 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 533 | static void eth_work(struct work_struct *work) |
| 534 | { |
| 535 | struct eth_dev *dev = container_of(work, struct eth_dev, work); |
| 536 | |
| 537 | if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { |
| 538 | if (netif_running(dev->net)) |
| 539 | rx_fill(dev, GFP_KERNEL); |
| 540 | } |
| 541 | |
| 542 | if (dev->todo) |
| 543 | DBG(dev, "work done, flags = 0x%lx\n", dev->todo); |
| 544 | } |
| 545 | |
| 546 | static void tx_complete(struct usb_ep *ep, struct usb_request *req) |
| 547 | { |
| 548 | struct sk_buff *skb = req->context; |
| 549 | struct eth_dev *dev = ep->driver_data; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 550 | struct net_device *net = dev->net; |
| 551 | struct usb_request *new_req; |
| 552 | struct usb_ep *in; |
| 553 | int length; |
| 554 | int retval; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 555 | |
Rajkumar Raghupathy | d868245 | 2013-01-28 11:48:47 +0530 | [diff] [blame] | 556 | if (!dev->port_usb) { |
| 557 | usb_ep_free_request(ep, req); |
| 558 | return; |
| 559 | } |
| 560 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 561 | switch (req->status) { |
| 562 | default: |
| 563 | dev->net->stats.tx_errors++; |
| 564 | VDBG(dev, "tx err %d\n", req->status); |
| 565 | /* FALLTHROUGH */ |
| 566 | case -ECONNRESET: /* unlink */ |
| 567 | case -ESHUTDOWN: /* disconnect etc */ |
| 568 | break; |
| 569 | case 0: |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 570 | if (!req->zero) |
Vamsi Krishna | 41fc495 | 2014-06-01 20:20:15 -0700 | [diff] [blame] | 571 | dev->net->stats.tx_bytes += req->actual-1; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 572 | else |
Vamsi Krishna | 41fc495 | 2014-06-01 20:20:15 -0700 | [diff] [blame] | 573 | dev->net->stats.tx_bytes += req->actual; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 574 | } |
| 575 | dev->net->stats.tx_packets++; |
| 576 | |
| 577 | spin_lock(&dev->req_lock); |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 578 | |
Manu Gautam | f8afc31 | 2014-01-29 16:59:21 +0530 | [diff] [blame] | 579 | if (dev->port_usb->multi_pkt_xfer && !req->context) { |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 580 | dev->no_tx_req_used--; |
| 581 | req->length = 0; |
| 582 | in = dev->port_usb->in_ep; |
| 583 | |
Sujeet Kumar | df2a502 | 2015-02-04 16:38:33 +0530 | [diff] [blame] | 584 | /* Do not process further if no_interrupt is set */ |
| 585 | if (!req->no_interrupt && !list_empty(&dev->tx_reqs)) { |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 586 | new_req = container_of(dev->tx_reqs.next, |
| 587 | struct usb_request, list); |
| 588 | list_del(&new_req->list); |
| 589 | spin_unlock(&dev->req_lock); |
| 590 | if (new_req->length > 0) { |
| 591 | length = new_req->length; |
| 592 | |
| 593 | /* NCM requires no zlp if transfer is |
| 594 | * dwNtbInMaxSize */ |
| 595 | if (dev->port_usb->is_fixed && |
| 596 | length == dev->port_usb->fixed_in_len && |
| 597 | (length % in->maxpacket) == 0) |
| 598 | new_req->zero = 0; |
| 599 | else |
| 600 | new_req->zero = 1; |
| 601 | |
| 602 | /* use zlp framing on tx for strict CDC-Ether |
| 603 | * conformance, though any robust network rx |
| 604 | * path ignores extra padding. and some hardware |
| 605 | * doesn't like to write zlps. |
| 606 | */ |
| 607 | if (new_req->zero && !dev->zlp && |
| 608 | (length % in->maxpacket) == 0) { |
| 609 | new_req->zero = 0; |
| 610 | length++; |
| 611 | } |
| 612 | |
Sujeet Kumar | df2a502 | 2015-02-04 16:38:33 +0530 | [diff] [blame] | 613 | /* set when tx completion interrupt needed */ |
| 614 | spin_lock(&dev->req_lock); |
| 615 | dev->tx_qlen++; |
| 616 | if (dev->tx_qlen == MAX_TX_REQ_WITH_NO_INT) { |
| 617 | new_req->no_interrupt = 0; |
| 618 | dev->tx_qlen = 0; |
| 619 | } else { |
| 620 | new_req->no_interrupt = 1; |
| 621 | } |
| 622 | spin_unlock(&dev->req_lock); |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 623 | new_req->length = length; |
Tarun Gupta | 091d4ff | 2015-07-30 11:59:11 +0530 | [diff] [blame] | 624 | new_req->complete = tx_complete; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 625 | retval = usb_ep_queue(in, new_req, GFP_ATOMIC); |
| 626 | switch (retval) { |
| 627 | default: |
| 628 | DBG(dev, "tx queue err %d\n", retval); |
Pavankumar Kondeti | b8a3362 | 2013-04-01 18:13:32 +0530 | [diff] [blame] | 629 | new_req->length = 0; |
| 630 | spin_lock(&dev->req_lock); |
| 631 | list_add_tail(&new_req->list, |
| 632 | &dev->tx_reqs); |
| 633 | spin_unlock(&dev->req_lock); |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 634 | break; |
| 635 | case 0: |
| 636 | spin_lock(&dev->req_lock); |
| 637 | dev->no_tx_req_used++; |
| 638 | spin_unlock(&dev->req_lock); |
Amit Pundir | 0917208 | 2016-05-30 15:19:21 +0530 | [diff] [blame] | 639 | netif_trans_update(net); |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 640 | } |
| 641 | } else { |
| 642 | spin_lock(&dev->req_lock); |
Pavankumar Kondeti | b8a3362 | 2013-04-01 18:13:32 +0530 | [diff] [blame] | 643 | /* |
| 644 | * Put the idle request at the back of the |
| 645 | * queue. The xmit function will put the |
| 646 | * unfinished request at the beginning of the |
| 647 | * queue. |
| 648 | */ |
| 649 | list_add_tail(&new_req->list, &dev->tx_reqs); |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 650 | spin_unlock(&dev->req_lock); |
| 651 | } |
| 652 | } else { |
| 653 | spin_unlock(&dev->req_lock); |
| 654 | } |
| 655 | } else { |
Manu Gautam | f8afc31 | 2014-01-29 16:59:21 +0530 | [diff] [blame] | 656 | /* Is aggregation already enabled and buffers allocated ? */ |
| 657 | if (dev->port_usb->multi_pkt_xfer && dev->tx_req_bufsize) { |
ChandanaKishori Chiluveru | 2b5ed57 | 2015-08-05 15:30:40 +0530 | [diff] [blame] | 658 | req->buf = kzalloc(dev->tx_req_bufsize |
| 659 | + dev->gadget->extra_buf_alloc, GFP_ATOMIC); |
Manu Gautam | f8afc31 | 2014-01-29 16:59:21 +0530 | [diff] [blame] | 660 | req->context = NULL; |
| 661 | } else { |
| 662 | req->buf = NULL; |
| 663 | } |
| 664 | |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 665 | spin_unlock(&dev->req_lock); |
| 666 | dev_kfree_skb_any(skb); |
| 667 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 668 | |
Sujeet Kumar | df2a502 | 2015-02-04 16:38:33 +0530 | [diff] [blame] | 669 | /* put the completed req back to tx_reqs tail pool */ |
| 670 | spin_lock(&dev->req_lock); |
| 671 | list_add_tail(&req->list, &dev->tx_reqs); |
| 672 | spin_unlock(&dev->req_lock); |
| 673 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 674 | if (netif_carrier_ok(dev->net)) |
| 675 | netif_wake_queue(dev->net); |
| 676 | } |
| 677 | |
| 678 | static inline int is_promisc(u16 cdc_filter) |
| 679 | { |
| 680 | return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; |
| 681 | } |
| 682 | |
Rajkumar Raghupathy | 7d4a6cb | 2013-05-23 11:37:41 +0530 | [diff] [blame] | 683 | static int alloc_tx_buffer(struct eth_dev *dev) |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 684 | { |
| 685 | struct list_head *act; |
| 686 | struct usb_request *req; |
| 687 | |
xerox_lin | cdffcb8 | 2014-09-04 16:01:59 +0800 | [diff] [blame] | 688 | dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer * |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 689 | (dev->net->mtu |
| 690 | + sizeof(struct ethhdr) |
| 691 | /* size of rndis_packet_msg_type */ |
| 692 | + 44 |
| 693 | + 22)); |
| 694 | |
| 695 | list_for_each(act, &dev->tx_reqs) { |
| 696 | req = container_of(act, struct usb_request, list); |
| 697 | if (!req->buf) |
ChandanaKishori Chiluveru | 2b5ed57 | 2015-08-05 15:30:40 +0530 | [diff] [blame] | 698 | req->buf = kmalloc(dev->tx_req_bufsize |
| 699 | + dev->gadget->extra_buf_alloc, GFP_ATOMIC); |
Rajkumar Raghupathy | 7d4a6cb | 2013-05-23 11:37:41 +0530 | [diff] [blame] | 700 | |
| 701 | if (!req->buf) |
| 702 | goto free_buf; |
Manu Gautam | f8afc31 | 2014-01-29 16:59:21 +0530 | [diff] [blame] | 703 | |
| 704 | /* req->context is not used for multi_pkt_xfers */ |
| 705 | req->context = NULL; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 706 | } |
Rajkumar Raghupathy | 7d4a6cb | 2013-05-23 11:37:41 +0530 | [diff] [blame] | 707 | return 0; |
| 708 | |
| 709 | free_buf: |
| 710 | /* tx_req_bufsize = 0 retries mem alloc on next eth_start_xmit */ |
| 711 | dev->tx_req_bufsize = 0; |
| 712 | list_for_each(act, &dev->tx_reqs) { |
| 713 | req = container_of(act, struct usb_request, list); |
| 714 | kfree(req->buf); |
| 715 | req->buf = NULL; |
| 716 | } |
| 717 | return -ENOMEM; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 718 | } |
| 719 | |
Stephen Hemminger | 25a79c4 | 2009-08-31 19:50:45 +0000 | [diff] [blame] | 720 | static netdev_tx_t eth_start_xmit(struct sk_buff *skb, |
| 721 | struct net_device *net) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 722 | { |
| 723 | struct eth_dev *dev = netdev_priv(net); |
Jim Baxter | 6d3865f | 2014-07-07 18:33:18 +0100 | [diff] [blame] | 724 | int length = 0; |
Tarun Gupta | 616d2ea | 2015-09-08 16:58:20 +0530 | [diff] [blame] | 725 | int tail_room = 0; |
| 726 | int extra_alloc = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 727 | int retval; |
| 728 | struct usb_request *req = NULL; |
Tarun Gupta | 616d2ea | 2015-09-08 16:58:20 +0530 | [diff] [blame] | 729 | struct sk_buff *new_skb; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 730 | unsigned long flags; |
Vamsi Krishna | fdafb97 | 2014-04-02 15:18:55 -0700 | [diff] [blame] | 731 | struct usb_ep *in = NULL; |
| 732 | u16 cdc_filter = 0; |
Mayank Rana | 68f7474 | 2013-02-15 14:55:30 +0530 | [diff] [blame] | 733 | bool multi_pkt_xfer = false; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 734 | |
| 735 | spin_lock_irqsave(&dev->lock, flags); |
| 736 | if (dev->port_usb) { |
| 737 | in = dev->port_usb->in_ep; |
| 738 | cdc_filter = dev->port_usb->cdc_filter; |
Mayank Rana | 68f7474 | 2013-02-15 14:55:30 +0530 | [diff] [blame] | 739 | multi_pkt_xfer = dev->port_usb->multi_pkt_xfer; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 740 | } |
| 741 | spin_unlock_irqrestore(&dev->lock, flags); |
| 742 | |
Jim Baxter | 6d3865f | 2014-07-07 18:33:18 +0100 | [diff] [blame] | 743 | if (skb && !in) { |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 744 | dev_kfree_skb_any(skb); |
Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 745 | return NETDEV_TX_OK; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 746 | } |
| 747 | |
| 748 | /* apply outgoing CDC or RNDIS filters */ |
Jim Baxter | 6d3865f | 2014-07-07 18:33:18 +0100 | [diff] [blame] | 749 | if (skb && !is_promisc(cdc_filter)) { |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 750 | u8 *dest = skb->data; |
| 751 | |
| 752 | if (is_multicast_ether_addr(dest)) { |
| 753 | u16 type; |
| 754 | |
| 755 | /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host |
| 756 | * SET_ETHERNET_MULTICAST_FILTERS requests |
| 757 | */ |
| 758 | if (is_broadcast_ether_addr(dest)) |
| 759 | type = USB_CDC_PACKET_TYPE_BROADCAST; |
| 760 | else |
| 761 | type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; |
| 762 | if (!(cdc_filter & type)) { |
| 763 | dev_kfree_skb_any(skb); |
Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 764 | return NETDEV_TX_OK; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 765 | } |
| 766 | } |
| 767 | /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ |
| 768 | } |
| 769 | |
Vamsi Krishna | 41fc495 | 2014-06-01 20:20:15 -0700 | [diff] [blame] | 770 | dev->tx_pkts_rcvd++; |
Vamsi Krishna | fdafb97 | 2014-04-02 15:18:55 -0700 | [diff] [blame] | 771 | /* |
| 772 | * no buffer copies needed, unless the network stack did it |
| 773 | * or the hardware can't use skb buffers. |
| 774 | * or there's not enough space for extra headers we need |
| 775 | */ |
| 776 | spin_lock_irqsave(&dev->lock, flags); |
| 777 | if (dev->wrap && dev->port_usb) |
| 778 | skb = dev->wrap(dev->port_usb, skb); |
| 779 | spin_unlock_irqrestore(&dev->lock, flags); |
| 780 | |
| 781 | if (!skb) { |
| 782 | if (dev->port_usb && dev->port_usb->supports_multi_frame) { |
| 783 | /* |
| 784 | * Multi frame CDC protocols may store the frame for |
| 785 | * later which is not a dropped frame. |
| 786 | */ |
| 787 | } else { |
| 788 | dev->net->stats.tx_dropped++; |
| 789 | } |
| 790 | |
| 791 | /* no error code for dropped packets */ |
| 792 | return NETDEV_TX_OK; |
| 793 | } |
| 794 | |
| 795 | /* Allocate memory for tx_reqs to support multi packet transfer */ |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 796 | spin_lock_irqsave(&dev->req_lock, flags); |
Vamsi Krishna | fdafb97 | 2014-04-02 15:18:55 -0700 | [diff] [blame] | 797 | if (multi_pkt_xfer && !dev->tx_req_bufsize) { |
| 798 | retval = alloc_tx_buffer(dev); |
| 799 | if (retval < 0) { |
| 800 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 801 | return -ENOMEM; |
| 802 | } |
| 803 | } |
| 804 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 805 | /* |
| 806 | * this freelist can be empty if an interrupt triggered disconnect() |
| 807 | * and reconfigured the gadget (shutting down this queue) after the |
| 808 | * network stack decided to xmit but before we got the spinlock. |
| 809 | */ |
| 810 | if (list_empty(&dev->tx_reqs)) { |
| 811 | spin_unlock_irqrestore(&dev->req_lock, flags); |
Patrick McHardy | 5b54814 | 2009-06-12 06:22:29 +0000 | [diff] [blame] | 812 | return NETDEV_TX_BUSY; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 813 | } |
| 814 | |
| 815 | req = container_of(dev->tx_reqs.next, struct usb_request, list); |
| 816 | list_del(&req->list); |
| 817 | |
| 818 | /* temporarily stop TX queue when the freelist empties */ |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 819 | if (list_empty(&dev->tx_reqs)) { |
| 820 | /* |
| 821 | * tx_throttle gives info about number of times u_ether |
| 822 | * asked network layer to stop queueing packets to it |
| 823 | * when transmit resources are unavailable |
| 824 | */ |
| 825 | dev->tx_throttle++; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 826 | netif_stop_queue(net); |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 827 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 828 | |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 829 | dev->tx_skb_hold_count++; |
| 830 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 831 | |
Mayank Rana | 68f7474 | 2013-02-15 14:55:30 +0530 | [diff] [blame] | 832 | if (multi_pkt_xfer) { |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 833 | memcpy(req->buf + req->length, skb->data, skb->len); |
| 834 | req->length = req->length + skb->len; |
| 835 | length = req->length; |
| 836 | dev_kfree_skb_any(skb); |
| 837 | |
| 838 | spin_lock_irqsave(&dev->req_lock, flags); |
xerox_lin | cdffcb8 | 2014-09-04 16:01:59 +0800 | [diff] [blame] | 839 | if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) { |
Sujeet Kumar | df2a502 | 2015-02-04 16:38:33 +0530 | [diff] [blame] | 840 | /* |
| 841 | * should allow aggregation only, if the number of |
| 842 | * requests queued more than the tx requests that can |
| 843 | * be queued with no interrupt flag set sequentially. |
| 844 | * Otherwise, packets may be blocked forever. |
| 845 | */ |
| 846 | if (dev->no_tx_req_used > MAX_TX_REQ_WITH_NO_INT) { |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 847 | list_add(&req->list, &dev->tx_reqs); |
| 848 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 849 | goto success; |
| 850 | } |
| 851 | } |
| 852 | |
| 853 | dev->no_tx_req_used++; |
| 854 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 855 | |
| 856 | spin_lock_irqsave(&dev->lock, flags); |
| 857 | dev->tx_skb_hold_count = 0; |
| 858 | spin_unlock_irqrestore(&dev->lock, flags); |
| 859 | } else { |
Manu Gautam | 59559f2 | 2015-11-05 14:27:18 +0530 | [diff] [blame^] | 860 | bool do_align = false; |
| 861 | |
| 862 | /* Check if TX buffer should be aligned before queuing to hw */ |
| 863 | if (dev->gadget->is_chipidea && |
| 864 | !IS_ALIGNED((size_t)skb->data, 4)) |
| 865 | do_align = true; |
| 866 | |
Tarun Gupta | 616d2ea | 2015-09-08 16:58:20 +0530 | [diff] [blame] | 867 | /* |
| 868 | * Some UDC requires allocation of some extra bytes for |
| 869 | * TX buffer due to hardware requirement. Check if extra |
| 870 | * bytes are already there, otherwise allocate new buffer |
Manu Gautam | 59559f2 | 2015-11-05 14:27:18 +0530 | [diff] [blame^] | 871 | * with extra bytes and do memcpy to align skb as well. |
Tarun Gupta | 616d2ea | 2015-09-08 16:58:20 +0530 | [diff] [blame] | 872 | */ |
Tarun Gupta | 616d2ea | 2015-09-08 16:58:20 +0530 | [diff] [blame] | 873 | if (dev->gadget->extra_buf_alloc) |
| 874 | extra_alloc = EXTRA_ALLOCATION_SIZE_U_ETH; |
| 875 | tail_room = skb_tailroom(skb); |
Manu Gautam | 59559f2 | 2015-11-05 14:27:18 +0530 | [diff] [blame^] | 876 | if (do_align || tail_room < extra_alloc) { |
| 877 | pr_debug("%s:align skb and update tail_room %d to %d\n", |
| 878 | __func__, tail_room, extra_alloc); |
| 879 | tail_room = extra_alloc; |
| 880 | new_skb = skb_copy_expand(skb, 0, tail_room, |
| 881 | GFP_ATOMIC); |
Tarun Gupta | 616d2ea | 2015-09-08 16:58:20 +0530 | [diff] [blame] | 882 | if (!new_skb) |
| 883 | return -ENOMEM; |
| 884 | dev_kfree_skb_any(skb); |
| 885 | skb = new_skb; |
| 886 | dev->skb_expand_cnt++; |
| 887 | } |
| 888 | |
Manu Gautam | 59559f2 | 2015-11-05 14:27:18 +0530 | [diff] [blame^] | 889 | length = skb->len; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 890 | req->buf = skb->data; |
| 891 | req->context = skb; |
| 892 | } |
| 893 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 894 | req->complete = tx_complete; |
| 895 | |
Yauheni Kaliuta | 5c1168d | 2010-12-08 13:12:04 +0200 | [diff] [blame] | 896 | /* NCM requires no zlp if transfer is dwNtbInMaxSize */ |
Harish Jenny K N | 79775f4 | 2016-09-09 11:30:41 +0200 | [diff] [blame] | 897 | if (dev->port_usb && |
| 898 | dev->port_usb->is_fixed && |
Yauheni Kaliuta | 5c1168d | 2010-12-08 13:12:04 +0200 | [diff] [blame] | 899 | length == dev->port_usb->fixed_in_len && |
| 900 | (length % in->maxpacket) == 0) |
| 901 | req->zero = 0; |
| 902 | else |
| 903 | req->zero = 1; |
| 904 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 905 | /* use zlp framing on tx for strict CDC-Ether conformance, |
| 906 | * though any robust network rx path ignores extra padding. |
| 907 | * and some hardware doesn't like to write zlps. |
| 908 | */ |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 909 | if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) { |
| 910 | req->zero = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 911 | length++; |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 912 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 913 | |
| 914 | req->length = length; |
| 915 | |
Badhri Jagan Sridharan | cb6a786 | 2014-09-18 10:42:41 -0700 | [diff] [blame] | 916 | /* throttle highspeed IRQ rate back slightly */ |
| 917 | if (gadget_is_dualspeed(dev->gadget) && |
| 918 | (dev->gadget->speed == USB_SPEED_HIGH)) { |
Sujeet Kumar | df2a502 | 2015-02-04 16:38:33 +0530 | [diff] [blame] | 919 | spin_lock_irqsave(&dev->req_lock, flags); |
Badhri Jagan Sridharan | cb6a786 | 2014-09-18 10:42:41 -0700 | [diff] [blame] | 920 | dev->tx_qlen++; |
Sujeet Kumar | df2a502 | 2015-02-04 16:38:33 +0530 | [diff] [blame] | 921 | if (dev->tx_qlen == MAX_TX_REQ_WITH_NO_INT) { |
Badhri Jagan Sridharan | cb6a786 | 2014-09-18 10:42:41 -0700 | [diff] [blame] | 922 | req->no_interrupt = 0; |
| 923 | dev->tx_qlen = 0; |
| 924 | } else { |
| 925 | req->no_interrupt = 1; |
| 926 | } |
Sujeet Kumar | df2a502 | 2015-02-04 16:38:33 +0530 | [diff] [blame] | 927 | spin_unlock_irqrestore(&dev->req_lock, flags); |
Badhri Jagan Sridharan | cb6a786 | 2014-09-18 10:42:41 -0700 | [diff] [blame] | 928 | } else { |
| 929 | req->no_interrupt = 0; |
| 930 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 931 | |
| 932 | retval = usb_ep_queue(in, req, GFP_ATOMIC); |
| 933 | switch (retval) { |
| 934 | default: |
| 935 | DBG(dev, "tx queue err %d\n", retval); |
| 936 | break; |
| 937 | case 0: |
Florian Westphal | 860e953 | 2016-05-03 16:33:13 +0200 | [diff] [blame] | 938 | netif_trans_update(net); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 939 | } |
| 940 | |
| 941 | if (retval) { |
Mayank Rana | 68f7474 | 2013-02-15 14:55:30 +0530 | [diff] [blame] | 942 | if (!multi_pkt_xfer) |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 943 | dev_kfree_skb_any(skb); |
Pavankumar Kondeti | b8a3362 | 2013-04-01 18:13:32 +0530 | [diff] [blame] | 944 | else |
| 945 | req->length = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 946 | dev->net->stats.tx_dropped++; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 947 | spin_lock_irqsave(&dev->req_lock, flags); |
| 948 | if (list_empty(&dev->tx_reqs)) |
| 949 | netif_start_queue(net); |
| 950 | list_add(&req->list, &dev->tx_reqs); |
| 951 | spin_unlock_irqrestore(&dev->req_lock, flags); |
| 952 | } |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 953 | success: |
Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 954 | return NETDEV_TX_OK; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 955 | } |
| 956 | |
| 957 | /*-------------------------------------------------------------------------*/ |
| 958 | |
| 959 | static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) |
| 960 | { |
| 961 | DBG(dev, "%s\n", __func__); |
| 962 | |
| 963 | /* fill the rx queue */ |
| 964 | rx_fill(dev, gfp_flags); |
| 965 | |
| 966 | /* and open the tx floodgates */ |
Badhri Jagan Sridharan | cb6a786 | 2014-09-18 10:42:41 -0700 | [diff] [blame] | 967 | dev->tx_qlen = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 968 | netif_wake_queue(dev->net); |
| 969 | } |
| 970 | |
| 971 | static int eth_open(struct net_device *net) |
| 972 | { |
| 973 | struct eth_dev *dev = netdev_priv(net); |
| 974 | struct gether *link; |
| 975 | |
| 976 | DBG(dev, "%s\n", __func__); |
| 977 | if (netif_carrier_ok(dev->net)) |
| 978 | eth_start(dev, GFP_KERNEL); |
| 979 | |
| 980 | spin_lock_irq(&dev->lock); |
| 981 | link = dev->port_usb; |
| 982 | if (link && link->open) |
| 983 | link->open(link); |
| 984 | spin_unlock_irq(&dev->lock); |
| 985 | |
| 986 | return 0; |
| 987 | } |
| 988 | |
| 989 | static int eth_stop(struct net_device *net) |
| 990 | { |
| 991 | struct eth_dev *dev = netdev_priv(net); |
| 992 | unsigned long flags; |
| 993 | |
| 994 | VDBG(dev, "%s\n", __func__); |
| 995 | netif_stop_queue(net); |
| 996 | |
| 997 | DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", |
| 998 | dev->net->stats.rx_packets, dev->net->stats.tx_packets, |
| 999 | dev->net->stats.rx_errors, dev->net->stats.tx_errors |
| 1000 | ); |
| 1001 | |
| 1002 | /* ensure there are no more active requests */ |
| 1003 | spin_lock_irqsave(&dev->lock, flags); |
| 1004 | if (dev->port_usb) { |
| 1005 | struct gether *link = dev->port_usb; |
Michael Grzeschik | b1b552a | 2012-08-08 11:48:10 +0200 | [diff] [blame] | 1006 | const struct usb_endpoint_descriptor *in; |
| 1007 | const struct usb_endpoint_descriptor *out; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1008 | |
| 1009 | if (link->close) |
| 1010 | link->close(link); |
| 1011 | |
| 1012 | /* NOTE: we have no abort-queue primitive we could use |
| 1013 | * to cancel all pending I/O. Instead, we disable then |
| 1014 | * reenable the endpoints ... this idiom may leave toggle |
| 1015 | * wrong, but that's a self-correcting error. |
| 1016 | * |
| 1017 | * REVISIT: we *COULD* just let the transfers complete at |
| 1018 | * their own pace; the network stack can handle old packets. |
| 1019 | * For the moment we leave this here, since it works. |
| 1020 | */ |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1021 | if (link->in_ep) { |
| 1022 | in = link->in_ep->desc; |
| 1023 | usb_ep_disable(link->in_ep); |
| 1024 | if (netif_carrier_ok(net)) { |
| 1025 | DBG(dev, "host still using in endpoints\n"); |
| 1026 | link->in_ep->desc = in; |
| 1027 | usb_ep_enable(link->in_ep); |
| 1028 | } |
| 1029 | } |
| 1030 | |
| 1031 | if (link->out_ep) { |
| 1032 | out = link->out_ep->desc; |
| 1033 | usb_ep_disable(link->out_ep); |
| 1034 | if (netif_carrier_ok(net)) { |
| 1035 | DBG(dev, "host still using out endpoints\n"); |
| 1036 | link->out_ep->desc = out; |
| 1037 | usb_ep_enable(link->out_ep); |
| 1038 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1039 | } |
| 1040 | } |
| 1041 | spin_unlock_irqrestore(&dev->lock, flags); |
| 1042 | |
| 1043 | return 0; |
| 1044 | } |
| 1045 | |
| 1046 | /*-------------------------------------------------------------------------*/ |
| 1047 | |
Michal Nazarewicz | 28824b1 | 2010-05-05 12:53:13 +0200 | [diff] [blame] | 1048 | static int get_ether_addr(const char *str, u8 *dev_addr) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1049 | { |
| 1050 | if (str) { |
| 1051 | unsigned i; |
| 1052 | |
| 1053 | for (i = 0; i < 6; i++) { |
| 1054 | unsigned char num; |
| 1055 | |
| 1056 | if ((*str == '.') || (*str == ':')) |
| 1057 | str++; |
Andy Shevchenko | e644814 | 2010-06-15 17:04:44 +0300 | [diff] [blame] | 1058 | num = hex_to_bin(*str++) << 4; |
| 1059 | num |= hex_to_bin(*str++); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1060 | dev_addr [i] = num; |
| 1061 | } |
| 1062 | if (is_valid_ether_addr(dev_addr)) |
| 1063 | return 0; |
| 1064 | } |
Joe Perches | 006c913 | 2012-07-12 22:33:11 -0700 | [diff] [blame] | 1065 | eth_random_addr(dev_addr); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1066 | return 1; |
| 1067 | } |
| 1068 | |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1069 | static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) |
| 1070 | { |
| 1071 | if (len < 18) |
| 1072 | return -EINVAL; |
| 1073 | |
Andy Shevchenko | 27f3870 | 2015-01-15 13:40:04 +0200 | [diff] [blame] | 1074 | snprintf(str, len, "%pM", dev_addr); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1075 | return 18; |
| 1076 | } |
| 1077 | |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 1078 | static int ether_ioctl(struct net_device *, struct ifreq *, int); |
| 1079 | |
Stephen Hemminger | 5ec38f3 | 2009-01-07 18:05:39 -0800 | [diff] [blame] | 1080 | static const struct net_device_ops eth_netdev_ops = { |
| 1081 | .ndo_open = eth_open, |
| 1082 | .ndo_stop = eth_stop, |
| 1083 | .ndo_start_xmit = eth_start_xmit, |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 1084 | .ndo_do_ioctl = ether_ioctl, |
Stephen Hemminger | 5ec38f3 | 2009-01-07 18:05:39 -0800 | [diff] [blame] | 1085 | .ndo_change_mtu = ueth_change_mtu, |
| 1086 | .ndo_set_mac_address = eth_mac_addr, |
| 1087 | .ndo_validate_addr = eth_validate_addr, |
| 1088 | }; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1089 | |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 1090 | static const struct net_device_ops eth_netdev_ops_ip = { |
| 1091 | .ndo_open = eth_open, |
| 1092 | .ndo_stop = eth_stop, |
| 1093 | .ndo_start_xmit = eth_start_xmit, |
| 1094 | .ndo_do_ioctl = ether_ioctl, |
| 1095 | .ndo_change_mtu = ueth_change_mtu_ip, |
| 1096 | .ndo_set_mac_address = NULL, |
| 1097 | .ndo_validate_addr = NULL, |
| 1098 | }; |
| 1099 | |
| 1100 | static int rmnet_ioctl_extended(struct net_device *dev, struct ifreq *ifr) |
| 1101 | { |
| 1102 | struct rmnet_ioctl_extended_s ext_cmd; |
| 1103 | struct eth_dev *eth_dev = netdev_priv(dev); |
| 1104 | int rc = 0; |
| 1105 | |
| 1106 | rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data, |
| 1107 | sizeof(struct rmnet_ioctl_extended_s)); |
| 1108 | |
| 1109 | if (rc) { |
| 1110 | DBG(eth_dev, "%s(): copy_from_user() failed\n", __func__); |
| 1111 | return rc; |
| 1112 | } |
| 1113 | |
| 1114 | switch (ext_cmd.extended_ioctl) { |
| 1115 | case RMNET_IOCTL_GET_SUPPORTED_FEATURES: |
| 1116 | ext_cmd.u.data = 0; |
| 1117 | break; |
| 1118 | |
| 1119 | case RMNET_IOCTL_SET_MRU: |
| 1120 | if (netif_running(dev)) |
| 1121 | return -EBUSY; |
| 1122 | |
| 1123 | /* 16K max */ |
| 1124 | if ((size_t)ext_cmd.u.data > 0x4000) |
| 1125 | return -EINVAL; |
| 1126 | |
| 1127 | if (eth_dev->port_usb) { |
| 1128 | eth_dev->port_usb->is_fixed = true; |
| 1129 | eth_dev->port_usb->fixed_out_len = |
| 1130 | (size_t) ext_cmd.u.data; |
| 1131 | DBG(eth_dev, "[%s] rmnet_ioctl(): SET MRU to %u\n", |
| 1132 | dev->name, eth_dev->port_usb->fixed_out_len); |
| 1133 | } else { |
| 1134 | pr_err("[%s]: %s: SET MRU failed. Cable disconnected\n", |
| 1135 | dev->name, __func__); |
| 1136 | return -ENODEV; |
| 1137 | } |
| 1138 | break; |
| 1139 | |
| 1140 | case RMNET_IOCTL_GET_MRU: |
| 1141 | if (eth_dev->port_usb) { |
| 1142 | ext_cmd.u.data = eth_dev->port_usb->is_fixed ? |
| 1143 | eth_dev->port_usb->fixed_out_len : |
| 1144 | dev->mtu; |
| 1145 | } else { |
| 1146 | pr_err("[%s]: %s: GET MRU failed. Cable disconnected\n", |
| 1147 | dev->name, __func__); |
| 1148 | return -ENODEV; |
| 1149 | } |
| 1150 | break; |
| 1151 | |
| 1152 | case RMNET_IOCTL_GET_DRIVER_NAME: |
| 1153 | strlcpy(ext_cmd.u.if_name, dev->name, |
| 1154 | sizeof(ext_cmd.u.if_name)); |
| 1155 | break; |
| 1156 | |
| 1157 | default: |
| 1158 | break; |
| 1159 | } |
| 1160 | |
| 1161 | rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd, |
| 1162 | sizeof(struct rmnet_ioctl_extended_s)); |
| 1163 | |
| 1164 | if (rc) |
| 1165 | DBG(eth_dev, "%s(): copy_to_user() failed\n", __func__); |
| 1166 | return rc; |
| 1167 | } |
| 1168 | |
| 1169 | static int ether_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
| 1170 | { |
| 1171 | struct eth_dev *eth_dev = netdev_priv(dev); |
| 1172 | void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; |
| 1173 | int prev_mtu = dev->mtu; |
| 1174 | u32 state, old_opmode; |
| 1175 | int rc = -EFAULT; |
| 1176 | |
| 1177 | old_opmode = eth_dev->flags; |
| 1178 | /* Process IOCTL command */ |
| 1179 | switch (cmd) { |
| 1180 | case RMNET_IOCTL_SET_LLP_ETHERNET: /*Set Ethernet protocol*/ |
| 1181 | /* Perform Ethernet config only if in IP mode currently*/ |
| 1182 | if (test_bit(RMNET_MODE_LLP_IP, ð_dev->flags)) { |
| 1183 | ether_setup(dev); |
| 1184 | dev->mtu = prev_mtu; |
| 1185 | dev->netdev_ops = ð_netdev_ops; |
| 1186 | clear_bit(RMNET_MODE_LLP_IP, ð_dev->flags); |
| 1187 | set_bit(RMNET_MODE_LLP_ETH, ð_dev->flags); |
| 1188 | DBG(eth_dev, "[%s] ioctl(): set Ethernet proto mode\n", |
| 1189 | dev->name); |
| 1190 | } |
| 1191 | if (test_bit(RMNET_MODE_LLP_ETH, ð_dev->flags)) |
| 1192 | rc = 0; |
| 1193 | break; |
| 1194 | |
| 1195 | case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol*/ |
| 1196 | /* Perform IP config only if in Ethernet mode currently*/ |
| 1197 | if (test_bit(RMNET_MODE_LLP_ETH, ð_dev->flags)) { |
| 1198 | /* Undo config done in ether_setup() */ |
| 1199 | dev->header_ops = NULL; /* No header */ |
| 1200 | dev->type = ARPHRD_RAWIP; |
| 1201 | dev->hard_header_len = 0; |
| 1202 | dev->mtu = prev_mtu; |
| 1203 | dev->addr_len = 0; |
| 1204 | dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); |
| 1205 | dev->netdev_ops = ð_netdev_ops_ip; |
| 1206 | clear_bit(RMNET_MODE_LLP_ETH, ð_dev->flags); |
| 1207 | set_bit(RMNET_MODE_LLP_IP, ð_dev->flags); |
| 1208 | DBG(eth_dev, "[%s] ioctl(): set IP protocol mode\n", |
| 1209 | dev->name); |
| 1210 | } |
| 1211 | if (test_bit(RMNET_MODE_LLP_IP, ð_dev->flags)) |
| 1212 | rc = 0; |
| 1213 | break; |
| 1214 | |
| 1215 | case RMNET_IOCTL_GET_LLP: /* Get link protocol state */ |
| 1216 | state = eth_dev->flags & (RMNET_MODE_LLP_ETH |
| 1217 | | RMNET_MODE_LLP_IP); |
| 1218 | if (copy_to_user(addr, &state, sizeof(state))) |
| 1219 | break; |
| 1220 | rc = 0; |
| 1221 | break; |
| 1222 | |
| 1223 | case RMNET_IOCTL_SET_RX_HEADROOM: /* Set RX headroom */ |
| 1224 | if (copy_from_user(ð_dev->rx_needed_headroom, addr, |
| 1225 | sizeof(eth_dev->rx_needed_headroom))) |
| 1226 | break; |
| 1227 | DBG(eth_dev, "[%s] ioctl(): set RX HEADROOM: %x\n", |
| 1228 | dev->name, eth_dev->rx_needed_headroom); |
| 1229 | rc = 0; |
| 1230 | break; |
| 1231 | |
| 1232 | case RMNET_IOCTL_EXTENDED: |
| 1233 | rc = rmnet_ioctl_extended(dev, ifr); |
| 1234 | break; |
| 1235 | |
| 1236 | default: |
| 1237 | pr_err("[%s] error: ioctl called for unsupported cmd[%d]", |
| 1238 | dev->name, cmd); |
| 1239 | rc = -EINVAL; |
| 1240 | } |
| 1241 | |
| 1242 | DBG(eth_dev, "[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08lx\n", |
| 1243 | dev->name, __func__, cmd, old_opmode, eth_dev->flags); |
| 1244 | |
| 1245 | return rc; |
| 1246 | } |
| 1247 | |
Marcel Holtmann | aa79074 | 2010-01-15 22:13:58 -0800 | [diff] [blame] | 1248 | static struct device_type gadget_type = { |
| 1249 | .name = "gadget", |
| 1250 | }; |
| 1251 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1252 | /** |
Mike Lockwood | 036e98b | 2012-05-10 10:08:02 +0200 | [diff] [blame] | 1253 | * gether_setup_name - initialize one ethernet-over-usb link |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1254 | * @g: gadget to associated with these links |
| 1255 | * @ethaddr: NULL, or a buffer in which the ethernet address of the |
| 1256 | * host side of the link is recorded |
Mike Lockwood | 036e98b | 2012-05-10 10:08:02 +0200 | [diff] [blame] | 1257 | * @netname: name for network device (for example, "usb") |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1258 | * Context: may sleep |
| 1259 | * |
| 1260 | * This sets up the single network link that may be exported by a |
| 1261 | * gadget driver using this framework. The link layer addresses are |
| 1262 | * set up using module parameters. |
| 1263 | * |
Dan Carpenter | 574f24f | 2013-11-14 11:42:11 +0300 | [diff] [blame] | 1264 | * Returns an eth_dev pointer on success, or an ERR_PTR on failure. |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1265 | */ |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1266 | struct eth_dev *gether_setup_name(struct usb_gadget *g, |
| 1267 | const char *dev_addr, const char *host_addr, |
| 1268 | u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1269 | { |
| 1270 | struct eth_dev *dev; |
| 1271 | struct net_device *net; |
| 1272 | int status; |
| 1273 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1274 | net = alloc_etherdev(sizeof *dev); |
| 1275 | if (!net) |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1276 | return ERR_PTR(-ENOMEM); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1277 | |
| 1278 | dev = netdev_priv(net); |
| 1279 | spin_lock_init(&dev->lock); |
| 1280 | spin_lock_init(&dev->req_lock); |
| 1281 | INIT_WORK(&dev->work, eth_work); |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1282 | INIT_WORK(&dev->rx_work, process_rx_w); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1283 | INIT_LIST_HEAD(&dev->tx_reqs); |
| 1284 | INIT_LIST_HEAD(&dev->rx_reqs); |
| 1285 | |
Brian Niebuhr | 9b39e9d | 2009-08-14 10:04:22 -0500 | [diff] [blame] | 1286 | skb_queue_head_init(&dev->rx_frames); |
| 1287 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1288 | /* network device setup */ |
| 1289 | dev->net = net; |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1290 | dev->qmult = qmult; |
Mike Lockwood | 036e98b | 2012-05-10 10:08:02 +0200 | [diff] [blame] | 1291 | snprintf(net->name, sizeof(net->name), "%s%%d", netname); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1292 | |
| 1293 | if (get_ether_addr(dev_addr, net->dev_addr)) |
| 1294 | dev_warn(&g->dev, |
| 1295 | "using random %s ethernet address\n", "self"); |
| 1296 | if (get_ether_addr(host_addr, dev->host_mac)) |
| 1297 | dev_warn(&g->dev, |
| 1298 | "using random %s ethernet address\n", "host"); |
| 1299 | |
| 1300 | if (ethaddr) |
| 1301 | memcpy(ethaddr, dev->host_mac, ETH_ALEN); |
| 1302 | |
Stephen Hemminger | 5ec38f3 | 2009-01-07 18:05:39 -0800 | [diff] [blame] | 1303 | net->netdev_ops = ð_netdev_ops; |
| 1304 | |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 1305 | net->ethtool_ops = &ops; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1306 | |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 1307 | /* set operation mode to eth by default */ |
| 1308 | set_bit(RMNET_MODE_LLP_ETH, &dev->flags); |
| 1309 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1310 | dev->gadget = g; |
| 1311 | SET_NETDEV_DEV(net, &g->dev); |
Marcel Holtmann | aa79074 | 2010-01-15 22:13:58 -0800 | [diff] [blame] | 1312 | SET_NETDEV_DEVTYPE(net, &gadget_type); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1313 | |
| 1314 | status = register_netdev(net); |
| 1315 | if (status < 0) { |
| 1316 | dev_dbg(&g->dev, "register_netdev failed, %d\n", status); |
| 1317 | free_netdev(net); |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1318 | dev = ERR_PTR(status); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1319 | } else { |
Johannes Berg | e174961 | 2008-10-27 15:59:26 -0700 | [diff] [blame] | 1320 | INFO(dev, "MAC %pM\n", net->dev_addr); |
| 1321 | INFO(dev, "HOST MAC %pM\n", dev->host_mac); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1322 | |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1323 | /* |
| 1324 | * two kinds of host-initiated state changes: |
Kevin Cernekee | 31bde1c | 2012-06-24 21:11:22 -0700 | [diff] [blame] | 1325 | * - iff DATA transfer is active, carrier is "on" |
| 1326 | * - tx queueing enabled if open *and* carrier is "on" |
| 1327 | */ |
| 1328 | netif_carrier_off(net); |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 1329 | uether_debugfs_init(dev); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1330 | } |
| 1331 | |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1332 | return dev; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1333 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1334 | EXPORT_SYMBOL_GPL(gether_setup_name); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1335 | |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1336 | struct net_device *gether_setup_name_default(const char *netname) |
| 1337 | { |
| 1338 | struct net_device *net; |
| 1339 | struct eth_dev *dev; |
| 1340 | |
| 1341 | net = alloc_etherdev(sizeof(*dev)); |
| 1342 | if (!net) |
| 1343 | return ERR_PTR(-ENOMEM); |
| 1344 | |
| 1345 | dev = netdev_priv(net); |
| 1346 | spin_lock_init(&dev->lock); |
| 1347 | spin_lock_init(&dev->req_lock); |
| 1348 | INIT_WORK(&dev->work, eth_work); |
Matthew Moeller | 5df3222 | 2016-03-09 20:19:25 -0600 | [diff] [blame] | 1349 | INIT_WORK(&dev->rx_work, process_rx_w); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1350 | INIT_LIST_HEAD(&dev->tx_reqs); |
| 1351 | INIT_LIST_HEAD(&dev->rx_reqs); |
| 1352 | |
| 1353 | skb_queue_head_init(&dev->rx_frames); |
| 1354 | |
| 1355 | /* network device setup */ |
| 1356 | dev->net = net; |
| 1357 | dev->qmult = QMULT_DEFAULT; |
| 1358 | snprintf(net->name, sizeof(net->name), "%s%%d", netname); |
| 1359 | |
| 1360 | eth_random_addr(dev->dev_mac); |
| 1361 | pr_warn("using random %s ethernet address\n", "self"); |
| 1362 | eth_random_addr(dev->host_mac); |
| 1363 | pr_warn("using random %s ethernet address\n", "host"); |
| 1364 | |
| 1365 | net->netdev_ops = ð_netdev_ops; |
| 1366 | |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 1367 | net->ethtool_ops = &ops; |
Hemant Kumar | fc49dbd | 2018-01-23 12:08:47 -0800 | [diff] [blame] | 1368 | |
| 1369 | /* set operation mode to eth by default */ |
| 1370 | set_bit(RMNET_MODE_LLP_ETH, &dev->flags); |
| 1371 | |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1372 | SET_NETDEV_DEVTYPE(net, &gadget_type); |
| 1373 | |
| 1374 | return net; |
| 1375 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1376 | EXPORT_SYMBOL_GPL(gether_setup_name_default); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1377 | |
| 1378 | int gether_register_netdev(struct net_device *net) |
| 1379 | { |
| 1380 | struct eth_dev *dev; |
| 1381 | struct usb_gadget *g; |
| 1382 | struct sockaddr sa; |
| 1383 | int status; |
| 1384 | |
| 1385 | if (!net->dev.parent) |
| 1386 | return -EINVAL; |
| 1387 | dev = netdev_priv(net); |
| 1388 | g = dev->gadget; |
| 1389 | status = register_netdev(net); |
| 1390 | if (status < 0) { |
| 1391 | dev_dbg(&g->dev, "register_netdev failed, %d\n", status); |
| 1392 | return status; |
| 1393 | } else { |
| 1394 | INFO(dev, "HOST MAC %pM\n", dev->host_mac); |
| 1395 | |
| 1396 | /* two kinds of host-initiated state changes: |
| 1397 | * - iff DATA transfer is active, carrier is "on" |
| 1398 | * - tx queueing enabled if open *and* carrier is "on" |
| 1399 | */ |
| 1400 | netif_carrier_off(net); |
| 1401 | } |
| 1402 | sa.sa_family = net->type; |
| 1403 | memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN); |
| 1404 | rtnl_lock(); |
| 1405 | status = dev_set_mac_address(net, &sa); |
| 1406 | rtnl_unlock(); |
| 1407 | if (status) |
| 1408 | pr_warn("cannot set self ethernet address: %d\n", status); |
| 1409 | else |
| 1410 | INFO(dev, "MAC %pM\n", dev->dev_mac); |
| 1411 | |
| 1412 | return status; |
| 1413 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1414 | EXPORT_SYMBOL_GPL(gether_register_netdev); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1415 | |
| 1416 | void gether_set_gadget(struct net_device *net, struct usb_gadget *g) |
| 1417 | { |
| 1418 | struct eth_dev *dev; |
| 1419 | |
| 1420 | dev = netdev_priv(net); |
| 1421 | dev->gadget = g; |
| 1422 | SET_NETDEV_DEV(net, &g->dev); |
| 1423 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1424 | EXPORT_SYMBOL_GPL(gether_set_gadget); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1425 | |
| 1426 | int gether_set_dev_addr(struct net_device *net, const char *dev_addr) |
| 1427 | { |
| 1428 | struct eth_dev *dev; |
| 1429 | u8 new_addr[ETH_ALEN]; |
| 1430 | |
| 1431 | dev = netdev_priv(net); |
| 1432 | if (get_ether_addr(dev_addr, new_addr)) |
| 1433 | return -EINVAL; |
| 1434 | memcpy(dev->dev_mac, new_addr, ETH_ALEN); |
| 1435 | return 0; |
| 1436 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1437 | EXPORT_SYMBOL_GPL(gether_set_dev_addr); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1438 | |
| 1439 | int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) |
| 1440 | { |
| 1441 | struct eth_dev *dev; |
| 1442 | |
| 1443 | dev = netdev_priv(net); |
| 1444 | return get_ether_addr_str(dev->dev_mac, dev_addr, len); |
| 1445 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1446 | EXPORT_SYMBOL_GPL(gether_get_dev_addr); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1447 | |
| 1448 | int gether_set_host_addr(struct net_device *net, const char *host_addr) |
| 1449 | { |
| 1450 | struct eth_dev *dev; |
| 1451 | u8 new_addr[ETH_ALEN]; |
| 1452 | |
| 1453 | dev = netdev_priv(net); |
| 1454 | if (get_ether_addr(host_addr, new_addr)) |
| 1455 | return -EINVAL; |
| 1456 | memcpy(dev->host_mac, new_addr, ETH_ALEN); |
| 1457 | return 0; |
| 1458 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1459 | EXPORT_SYMBOL_GPL(gether_set_host_addr); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1460 | |
| 1461 | int gether_get_host_addr(struct net_device *net, char *host_addr, int len) |
| 1462 | { |
| 1463 | struct eth_dev *dev; |
| 1464 | |
| 1465 | dev = netdev_priv(net); |
| 1466 | return get_ether_addr_str(dev->host_mac, host_addr, len); |
| 1467 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1468 | EXPORT_SYMBOL_GPL(gether_get_host_addr); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1469 | |
| 1470 | int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) |
| 1471 | { |
| 1472 | struct eth_dev *dev; |
| 1473 | |
| 1474 | if (len < 13) |
| 1475 | return -EINVAL; |
| 1476 | |
| 1477 | dev = netdev_priv(net); |
| 1478 | snprintf(host_addr, len, "%pm", dev->host_mac); |
| 1479 | |
| 1480 | return strlen(host_addr); |
| 1481 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1482 | EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1483 | |
Andrzej Pietrasiewicz | bf4277c | 2013-05-28 09:15:45 +0200 | [diff] [blame] | 1484 | void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) |
| 1485 | { |
| 1486 | struct eth_dev *dev; |
| 1487 | |
| 1488 | dev = netdev_priv(net); |
| 1489 | memcpy(host_mac, dev->host_mac, ETH_ALEN); |
| 1490 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1491 | EXPORT_SYMBOL_GPL(gether_get_host_addr_u8); |
Andrzej Pietrasiewicz | bf4277c | 2013-05-28 09:15:45 +0200 | [diff] [blame] | 1492 | |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1493 | void gether_set_qmult(struct net_device *net, unsigned qmult) |
| 1494 | { |
| 1495 | struct eth_dev *dev; |
| 1496 | |
| 1497 | dev = netdev_priv(net); |
| 1498 | dev->qmult = qmult; |
| 1499 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1500 | EXPORT_SYMBOL_GPL(gether_set_qmult); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1501 | |
| 1502 | unsigned gether_get_qmult(struct net_device *net) |
| 1503 | { |
| 1504 | struct eth_dev *dev; |
| 1505 | |
| 1506 | dev = netdev_priv(net); |
| 1507 | return dev->qmult; |
| 1508 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1509 | EXPORT_SYMBOL_GPL(gether_get_qmult); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1510 | |
| 1511 | int gether_get_ifname(struct net_device *net, char *name, int len) |
| 1512 | { |
| 1513 | rtnl_lock(); |
| 1514 | strlcpy(name, netdev_name(net), len); |
| 1515 | rtnl_unlock(); |
| 1516 | return strlen(name); |
| 1517 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1518 | EXPORT_SYMBOL_GPL(gether_get_ifname); |
Andrzej Pietrasiewicz | bcd4a1c | 2013-05-23 09:22:05 +0200 | [diff] [blame] | 1519 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1520 | /** |
| 1521 | * gether_cleanup - remove Ethernet-over-USB device |
| 1522 | * Context: may sleep |
| 1523 | * |
| 1524 | * This is called to free all resources allocated by @gether_setup(). |
| 1525 | */ |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1526 | void gether_cleanup(struct eth_dev *dev) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1527 | { |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1528 | if (!dev) |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1529 | return; |
| 1530 | |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 1531 | uether_debugfs_exit(dev); |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1532 | unregister_netdev(dev->net); |
| 1533 | flush_work(&dev->work); |
| 1534 | free_netdev(dev->net); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1535 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1536 | EXPORT_SYMBOL_GPL(gether_cleanup); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1537 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1538 | /** |
| 1539 | * gether_connect - notify network layer that USB link is active |
| 1540 | * @link: the USB link, set up with endpoints, descriptors matching |
| 1541 | * current device speed, and any framing wrapper(s) set up. |
| 1542 | * Context: irqs blocked |
| 1543 | * |
| 1544 | * This is called to activate endpoints and let the network layer know |
| 1545 | * the connection is active ("carrier detect"). It may cause the I/O |
| 1546 | * queues to open and start letting network packets flow, but will in |
| 1547 | * any case activate the endpoints so that they respond properly to the |
| 1548 | * USB host. |
| 1549 | * |
| 1550 | * Verify net_device pointer returned using IS_ERR(). If it doesn't |
| 1551 | * indicate some error code (negative errno), ep->driver_data values |
| 1552 | * have been overwritten. |
| 1553 | */ |
| 1554 | struct net_device *gether_connect(struct gether *link) |
| 1555 | { |
Sebastian Andrzej Siewior | d6a0143 | 2012-12-23 21:10:12 +0100 | [diff] [blame] | 1556 | struct eth_dev *dev = link->ioport; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1557 | int result = 0; |
| 1558 | |
| 1559 | if (!dev) |
| 1560 | return ERR_PTR(-EINVAL); |
| 1561 | |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1562 | if (link->in_ep) { |
| 1563 | link->in_ep->driver_data = dev; |
| 1564 | result = usb_ep_enable(link->in_ep); |
| 1565 | if (result != 0) { |
| 1566 | DBG(dev, "enable %s --> %d\n", |
| 1567 | link->in_ep->name, result); |
| 1568 | goto fail0; |
| 1569 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1570 | } |
| 1571 | |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1572 | if (link->out_ep) { |
| 1573 | link->out_ep->driver_data = dev; |
| 1574 | result = usb_ep_enable(link->out_ep); |
| 1575 | if (result != 0) { |
| 1576 | DBG(dev, "enable %s --> %d\n", |
| 1577 | link->out_ep->name, result); |
| 1578 | goto fail1; |
| 1579 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1580 | } |
| 1581 | |
| 1582 | if (result == 0) |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1583 | result = alloc_requests(dev, link, qlen(dev->gadget, |
| 1584 | dev->qmult)); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1585 | |
| 1586 | if (result == 0) { |
| 1587 | dev->zlp = link->is_zlp_ok; |
Yoshihiro Shimoda | 05f6b0f | 2016-08-22 17:48:26 +0900 | [diff] [blame] | 1588 | dev->no_skb_reserve = link->no_skb_reserve; |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1589 | DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1590 | |
| 1591 | dev->header_len = link->header_len; |
| 1592 | dev->unwrap = link->unwrap; |
| 1593 | dev->wrap = link->wrap; |
xerox_lin | 87bebf8 | 2014-08-14 14:48:44 +0800 | [diff] [blame] | 1594 | dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer; |
xerox_lin | cdffcb8 | 2014-09-04 16:01:59 +0800 | [diff] [blame] | 1595 | dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1596 | |
| 1597 | spin_lock(&dev->lock); |
Badhri Jagan Sridharan | e791ad3 | 2014-09-18 10:46:08 -0700 | [diff] [blame] | 1598 | dev->tx_skb_hold_count = 0; |
| 1599 | dev->no_tx_req_used = 0; |
| 1600 | dev->tx_req_bufsize = 0; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1601 | dev->port_usb = link; |
David Brownell | 29bac7b | 2008-09-06 21:33:49 -0700 | [diff] [blame] | 1602 | if (netif_running(dev->net)) { |
| 1603 | if (link->open) |
| 1604 | link->open(link); |
| 1605 | } else { |
| 1606 | if (link->close) |
| 1607 | link->close(link); |
| 1608 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1609 | spin_unlock(&dev->lock); |
| 1610 | |
| 1611 | netif_carrier_on(dev->net); |
| 1612 | if (netif_running(dev->net)) |
| 1613 | eth_start(dev, GFP_ATOMIC); |
| 1614 | |
| 1615 | /* on error, disable any endpoints */ |
| 1616 | } else { |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1617 | if (link->out_ep) |
| 1618 | (void) usb_ep_disable(link->out_ep); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1619 | fail1: |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1620 | if (link->in_ep) |
| 1621 | (void) usb_ep_disable(link->in_ep); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1622 | } |
| 1623 | fail0: |
| 1624 | /* caller is responsible for cleanup on error */ |
| 1625 | if (result < 0) |
| 1626 | return ERR_PTR(result); |
| 1627 | return dev->net; |
| 1628 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1629 | EXPORT_SYMBOL_GPL(gether_connect); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1630 | |
| 1631 | /** |
| 1632 | * gether_disconnect - notify network layer that USB link is inactive |
| 1633 | * @link: the USB link, on which gether_connect() was called |
| 1634 | * Context: irqs blocked |
| 1635 | * |
| 1636 | * This is called to deactivate endpoints and let the network layer know |
| 1637 | * the connection went inactive ("no carrier"). |
| 1638 | * |
| 1639 | * On return, the state is as if gether_connect() had never been called. |
| 1640 | * The endpoints are inactive, and accordingly without active USB I/O. |
| 1641 | * Pointers to endpoint descriptors and endpoint private data are nulled. |
| 1642 | */ |
| 1643 | void gether_disconnect(struct gether *link) |
| 1644 | { |
| 1645 | struct eth_dev *dev = link->ioport; |
| 1646 | struct usb_request *req; |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1647 | struct sk_buff *skb; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1648 | |
| 1649 | WARN_ON(!dev); |
| 1650 | if (!dev) |
| 1651 | return; |
| 1652 | |
| 1653 | DBG(dev, "%s\n", __func__); |
| 1654 | |
| 1655 | netif_stop_queue(dev->net); |
| 1656 | netif_carrier_off(dev->net); |
| 1657 | |
| 1658 | /* disable endpoints, forcing (synchronous) completion |
| 1659 | * of all pending i/o. then free the request objects |
| 1660 | * and forget about the endpoints. |
| 1661 | */ |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1662 | if (link->in_ep) { |
| 1663 | usb_ep_disable(link->in_ep); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1664 | spin_lock(&dev->req_lock); |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1665 | while (!list_empty(&dev->tx_reqs)) { |
| 1666 | req = container_of(dev->tx_reqs.next, |
| 1667 | struct usb_request, list); |
| 1668 | list_del(&req->list); |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1669 | |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1670 | spin_unlock(&dev->req_lock); |
Rajkumar Raghupathy | 7d4a6cb | 2013-05-23 11:37:41 +0530 | [diff] [blame] | 1671 | if (link->multi_pkt_xfer) { |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1672 | kfree(req->buf); |
Rajkumar Raghupathy | 7d4a6cb | 2013-05-23 11:37:41 +0530 | [diff] [blame] | 1673 | req->buf = NULL; |
| 1674 | } |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1675 | usb_ep_free_request(link->in_ep, req); |
| 1676 | spin_lock(&dev->req_lock); |
| 1677 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1678 | spin_unlock(&dev->req_lock); |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1679 | link->in_ep->desc = NULL; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1680 | } |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1681 | |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1682 | if (link->out_ep) { |
| 1683 | usb_ep_disable(link->out_ep); |
| 1684 | spin_lock(&dev->req_lock); |
| 1685 | while (!list_empty(&dev->rx_reqs)) { |
| 1686 | req = container_of(dev->rx_reqs.next, |
| 1687 | struct usb_request, list); |
| 1688 | list_del(&req->list); |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1689 | |
Hemant Kumar | 8a7c812 | 2018-01-05 11:47:46 -0800 | [diff] [blame] | 1690 | spin_unlock(&dev->req_lock); |
| 1691 | usb_ep_free_request(link->out_ep, req); |
| 1692 | spin_lock(&dev->req_lock); |
| 1693 | } |
| 1694 | spin_unlock(&dev->req_lock); |
| 1695 | |
| 1696 | spin_lock(&dev->rx_frames.lock); |
| 1697 | while ((skb = __skb_dequeue(&dev->rx_frames))) |
| 1698 | dev_kfree_skb_any(skb); |
| 1699 | spin_unlock(&dev->rx_frames.lock); |
| 1700 | |
| 1701 | link->out_ep->desc = NULL; |
| 1702 | } |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1703 | |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 1704 | pr_debug("%s(): tx_throttle count= %lu", __func__, |
| 1705 | dev->tx_throttle); |
| 1706 | /* reset tx_throttle count */ |
| 1707 | dev->tx_throttle = 0; |
Manu Gautam | 354be03 | 2014-05-15 13:46:33 +0530 | [diff] [blame] | 1708 | dev->rx_throttle = 0; |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 1709 | |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1710 | /* finish forgetting about this USB link episode */ |
| 1711 | dev->header_len = 0; |
| 1712 | dev->unwrap = NULL; |
| 1713 | dev->wrap = NULL; |
| 1714 | |
| 1715 | spin_lock(&dev->lock); |
| 1716 | dev->port_usb = NULL; |
David Brownell | 2b3d942 | 2008-06-19 18:19:28 -0700 | [diff] [blame] | 1717 | spin_unlock(&dev->lock); |
| 1718 | } |
Felipe Balbi | 0700faa | 2014-04-01 13:19:32 -0500 | [diff] [blame] | 1719 | EXPORT_SYMBOL_GPL(gether_disconnect); |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1720 | |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 1721 | static int uether_stat_show(struct seq_file *s, void *unused) |
| 1722 | { |
| 1723 | struct eth_dev *dev = s->private; |
| 1724 | int ret = 0; |
| 1725 | |
Manu Gautam | 354be03 | 2014-05-15 13:46:33 +0530 | [diff] [blame] | 1726 | if (dev) { |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 1727 | seq_printf(s, "tx_throttle = %lu\n", dev->tx_throttle); |
Vamsi Krishna | 41fc495 | 2014-06-01 20:20:15 -0700 | [diff] [blame] | 1728 | seq_printf(s, "tx_pkts_rcvd=%u\n", dev->tx_pkts_rcvd); |
Manu Gautam | 354be03 | 2014-05-15 13:46:33 +0530 | [diff] [blame] | 1729 | seq_printf(s, "rx_throttle = %lu\n", dev->rx_throttle); |
Tarun Gupta | 616d2ea | 2015-09-08 16:58:20 +0530 | [diff] [blame] | 1730 | seq_printf(s, "skb_expand_cnt = %lu\n", |
| 1731 | dev->skb_expand_cnt); |
Manu Gautam | 354be03 | 2014-05-15 13:46:33 +0530 | [diff] [blame] | 1732 | } |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 1733 | return ret; |
| 1734 | } |
| 1735 | |
| 1736 | static int uether_open(struct inode *inode, struct file *file) |
| 1737 | { |
| 1738 | return single_open(file, uether_stat_show, inode->i_private); |
| 1739 | } |
| 1740 | |
| 1741 | static ssize_t uether_stat_reset(struct file *file, |
| 1742 | const char __user *ubuf, size_t count, loff_t *ppos) |
| 1743 | { |
| 1744 | struct seq_file *s = file->private_data; |
| 1745 | struct eth_dev *dev = s->private; |
| 1746 | unsigned long flags; |
| 1747 | |
| 1748 | spin_lock_irqsave(&dev->lock, flags); |
| 1749 | /* Reset tx_throttle */ |
| 1750 | dev->tx_throttle = 0; |
Manu Gautam | 354be03 | 2014-05-15 13:46:33 +0530 | [diff] [blame] | 1751 | dev->rx_throttle = 0; |
Tarun Gupta | 616d2ea | 2015-09-08 16:58:20 +0530 | [diff] [blame] | 1752 | dev->skb_expand_cnt = 0; |
Saket Saurabh | 6481d88 | 2013-09-27 15:52:36 +0530 | [diff] [blame] | 1753 | spin_unlock_irqrestore(&dev->lock, flags); |
| 1754 | return count; |
| 1755 | } |
| 1756 | |
| 1757 | static const struct file_operations uether_stats_ops = { |
| 1758 | .open = uether_open, |
| 1759 | .read = seq_read, |
| 1760 | .write = uether_stat_reset, |
| 1761 | }; |
| 1762 | |
| 1763 | static void uether_debugfs_init(struct eth_dev *dev) |
| 1764 | { |
| 1765 | struct dentry *uether_dent; |
| 1766 | struct dentry *uether_dfile; |
| 1767 | |
| 1768 | uether_dent = debugfs_create_dir("uether_rndis", NULL); |
| 1769 | if (IS_ERR(uether_dent)) |
| 1770 | return; |
| 1771 | dev->uether_dent = uether_dent; |
| 1772 | |
| 1773 | uether_dfile = debugfs_create_file("status", 0644, |
| 1774 | uether_dent, dev, &uether_stats_ops); |
| 1775 | if (!uether_dfile || IS_ERR(uether_dfile)) |
| 1776 | debugfs_remove(uether_dent); |
| 1777 | dev->uether_dfile = uether_dfile; |
| 1778 | } |
| 1779 | |
| 1780 | static void uether_debugfs_exit(struct eth_dev *dev) |
| 1781 | { |
| 1782 | debugfs_remove(dev->uether_dfile); |
| 1783 | debugfs_remove(dev->uether_dent); |
| 1784 | dev->uether_dent = NULL; |
| 1785 | dev->uether_dfile = NULL; |
| 1786 | } |
| 1787 | |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1788 | static int __init gether_init(void) |
| 1789 | { |
| 1790 | uether_wq = create_singlethread_workqueue("uether"); |
| 1791 | if (!uether_wq) { |
| 1792 | pr_err("%s: Unable to create workqueue: uether\n", __func__); |
| 1793 | return -ENOMEM; |
| 1794 | } |
| 1795 | return 0; |
| 1796 | } |
| 1797 | module_init(gether_init); |
| 1798 | |
| 1799 | static void __exit gether_exit(void) |
| 1800 | { |
| 1801 | destroy_workqueue(uether_wq); |
| 1802 | |
| 1803 | } |
| 1804 | module_exit(gether_exit); |
Andrzej Pietrasiewicz | f1a1823 | 2013-05-23 09:22:03 +0200 | [diff] [blame] | 1805 | MODULE_AUTHOR("David Brownell"); |
Badhri Jagan Sridharan | a096ba5 | 2014-09-24 18:58:23 -0700 | [diff] [blame] | 1806 | MODULE_DESCRIPTION("ethernet over USB driver"); |
| 1807 | MODULE_LICENSE("GPL v2"); |