blob: 4daf3d0926a82cfb52fd6cf774f0a467230c3246 [file] [log] [blame]
Jon Mason548c2372012-11-16 19:27:13 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -04008 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Mason548c2372012-11-16 19:27:13 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * BSD LICENSE
15 *
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -040017 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Mason548c2372012-11-16 19:27:13 -070018 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
28 * distribution.
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
Allen Hubbee26a5842015-04-09 10:33:20 -040045 * PCIe NTB Network Linux driver
Jon Mason548c2372012-11-16 19:27:13 -070046 *
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
49 */
50#include <linux/etherdevice.h>
51#include <linux/ethtool.h>
52#include <linux/module.h>
53#include <linux/pci.h>
Allen Hubbee26a5842015-04-09 10:33:20 -040054#include <linux/ntb.h>
Allen Hubbeec110bc2015-05-07 06:45:21 -040055#include <linux/ntb_transport.h>
Jon Mason548c2372012-11-16 19:27:13 -070056
Jon Mason24208bb2013-01-19 02:02:35 -070057#define NTB_NETDEV_VER "0.7"
Jon Mason548c2372012-11-16 19:27:13 -070058
59MODULE_DESCRIPTION(KBUILD_MODNAME);
60MODULE_VERSION(NTB_NETDEV_VER);
61MODULE_LICENSE("Dual BSD/GPL");
62MODULE_AUTHOR("Intel Corporation");
63
Dave Jiange74bfee2015-07-13 08:07:17 -040064/* Time in usecs for tx resource reaper */
65static unsigned int tx_time = 1;
66
67/* Number of descriptors to free before resuming tx */
68static unsigned int tx_start = 10;
69
70/* Number of descriptors still available before stop upper layer tx */
71static unsigned int tx_stop = 5;
72
Jon Mason548c2372012-11-16 19:27:13 -070073struct ntb_netdev {
74 struct list_head list;
75 struct pci_dev *pdev;
76 struct net_device *ndev;
77 struct ntb_transport_qp *qp;
Dave Jiange74bfee2015-07-13 08:07:17 -040078 struct timer_list tx_timer;
Jon Mason548c2372012-11-16 19:27:13 -070079};
80
81#define NTB_TX_TIMEOUT_MS 1000
82#define NTB_RXQ_SIZE 100
83
84static LIST_HEAD(dev_list);
85
Allen Hubbee26a5842015-04-09 10:33:20 -040086static void ntb_netdev_event_handler(void *data, int link_is_up)
Jon Mason548c2372012-11-16 19:27:13 -070087{
88 struct net_device *ndev = data;
89 struct ntb_netdev *dev = netdev_priv(ndev);
90
Allen Hubbee26a5842015-04-09 10:33:20 -040091 netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up,
Jon Mason548c2372012-11-16 19:27:13 -070092 ntb_transport_link_query(dev->qp));
93
Allen Hubbee26a5842015-04-09 10:33:20 -040094 if (link_is_up) {
95 if (ntb_transport_link_query(dev->qp))
96 netif_carrier_on(ndev);
97 } else {
Jon Mason548c2372012-11-16 19:27:13 -070098 netif_carrier_off(ndev);
Jon Mason403c63c2013-07-29 16:31:18 -070099 }
Jon Mason548c2372012-11-16 19:27:13 -0700100}
101
102static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
103 void *data, int len)
104{
105 struct net_device *ndev = qp_data;
106 struct sk_buff *skb;
107 int rc;
108
109 skb = data;
110 if (!skb)
111 return;
112
113 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
114
Allen Hubbeda2e5ae2015-07-13 08:07:08 -0400115 if (len < 0) {
116 ndev->stats.rx_errors++;
117 ndev->stats.rx_length_errors++;
118 goto enqueue_again;
119 }
120
Jon Mason548c2372012-11-16 19:27:13 -0700121 skb_put(skb, len);
122 skb->protocol = eth_type_trans(skb, ndev);
123 skb->ip_summed = CHECKSUM_NONE;
124
125 if (netif_rx(skb) == NET_RX_DROP) {
126 ndev->stats.rx_errors++;
127 ndev->stats.rx_dropped++;
128 } else {
129 ndev->stats.rx_packets++;
130 ndev->stats.rx_bytes += len;
131 }
132
133 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
134 if (!skb) {
135 ndev->stats.rx_errors++;
136 ndev->stats.rx_frame_errors++;
137 return;
138 }
139
Allen Hubbeda2e5ae2015-07-13 08:07:08 -0400140enqueue_again:
Jon Mason548c2372012-11-16 19:27:13 -0700141 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
142 if (rc) {
Jon Mason765ccc72013-01-19 02:02:31 -0700143 dev_kfree_skb(skb);
Jon Mason548c2372012-11-16 19:27:13 -0700144 ndev->stats.rx_errors++;
145 ndev->stats.rx_fifo_errors++;
146 }
147}
148
Dave Jiange74bfee2015-07-13 08:07:17 -0400149static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
150 struct ntb_transport_qp *qp, int size)
151{
152 struct ntb_netdev *dev = netdev_priv(netdev);
153
154 netif_stop_queue(netdev);
155 /* Make sure to see the latest value of ntb_transport_tx_free_entry()
156 * since the queue was last started.
157 */
158 smp_mb();
159
160 if (likely(ntb_transport_tx_free_entry(qp) < size)) {
161 mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
162 return -EBUSY;
163 }
164
165 netif_start_queue(netdev);
166 return 0;
167}
168
169static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
170 struct ntb_transport_qp *qp, int size)
171{
172 if (netif_queue_stopped(ndev) ||
173 (ntb_transport_tx_free_entry(qp) >= size))
174 return 0;
175
176 return __ntb_netdev_maybe_stop_tx(ndev, qp, size);
177}
178
Jon Mason548c2372012-11-16 19:27:13 -0700179static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
180 void *data, int len)
181{
182 struct net_device *ndev = qp_data;
183 struct sk_buff *skb;
Dave Jiange74bfee2015-07-13 08:07:17 -0400184 struct ntb_netdev *dev = netdev_priv(ndev);
Jon Mason548c2372012-11-16 19:27:13 -0700185
186 skb = data;
187 if (!skb || !ndev)
188 return;
189
190 if (len > 0) {
191 ndev->stats.tx_packets++;
192 ndev->stats.tx_bytes += skb->len;
193 } else {
194 ndev->stats.tx_errors++;
195 ndev->stats.tx_aborted_errors++;
196 }
197
198 dev_kfree_skb(skb);
Dave Jiange74bfee2015-07-13 08:07:17 -0400199
200 if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
201 /* Make sure anybody stopping the queue after this sees the new
202 * value of ntb_transport_tx_free_entry()
203 */
204 smp_mb();
205 if (netif_queue_stopped(ndev))
206 netif_wake_queue(ndev);
207 }
Jon Mason548c2372012-11-16 19:27:13 -0700208}
209
210static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
211 struct net_device *ndev)
212{
213 struct ntb_netdev *dev = netdev_priv(ndev);
214 int rc;
215
Dave Jiange74bfee2015-07-13 08:07:17 -0400216 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
217
Jon Mason548c2372012-11-16 19:27:13 -0700218 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
219 if (rc)
220 goto err;
221
Dave Jiange74bfee2015-07-13 08:07:17 -0400222 /* check for next submit */
223 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
224
Jon Mason548c2372012-11-16 19:27:13 -0700225 return NETDEV_TX_OK;
226
227err:
228 ndev->stats.tx_dropped++;
229 ndev->stats.tx_errors++;
Jon Mason548c2372012-11-16 19:27:13 -0700230 return NETDEV_TX_BUSY;
231}
232
Dave Jiange74bfee2015-07-13 08:07:17 -0400233static void ntb_netdev_tx_timer(unsigned long data)
234{
235 struct net_device *ndev = (struct net_device *)data;
236 struct ntb_netdev *dev = netdev_priv(ndev);
237
238 if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
239 mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
240 } else {
241 /* Make sure anybody stopping the queue after this sees the new
242 * value of ntb_transport_tx_free_entry()
243 */
244 smp_mb();
245 if (netif_queue_stopped(ndev))
246 netif_wake_queue(ndev);
247 }
248}
249
Jon Mason548c2372012-11-16 19:27:13 -0700250static int ntb_netdev_open(struct net_device *ndev)
251{
252 struct ntb_netdev *dev = netdev_priv(ndev);
253 struct sk_buff *skb;
254 int rc, i, len;
255
256 /* Add some empty rx bufs */
257 for (i = 0; i < NTB_RXQ_SIZE; i++) {
258 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
259 if (!skb) {
260 rc = -ENOMEM;
261 goto err;
262 }
263
264 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
265 ndev->mtu + ETH_HLEN);
Dave Jiangda4eb272015-07-13 08:07:10 -0400266 if (rc) {
Jon Masone8bc2ebd2013-11-22 16:50:57 -0700267 dev_kfree_skb(skb);
Jon Mason548c2372012-11-16 19:27:13 -0700268 goto err;
Jon Masone8bc2ebd2013-11-22 16:50:57 -0700269 }
Jon Mason548c2372012-11-16 19:27:13 -0700270 }
271
Dave Jiange74bfee2015-07-13 08:07:17 -0400272 setup_timer(&dev->tx_timer, ntb_netdev_tx_timer, (unsigned long)ndev);
273
Jon Mason548c2372012-11-16 19:27:13 -0700274 netif_carrier_off(ndev);
275 ntb_transport_link_up(dev->qp);
Dave Jiange74bfee2015-07-13 08:07:17 -0400276 netif_start_queue(ndev);
Jon Mason548c2372012-11-16 19:27:13 -0700277
278 return 0;
279
280err:
281 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
282 dev_kfree_skb(skb);
283 return rc;
284}
285
286static int ntb_netdev_close(struct net_device *ndev)
287{
288 struct ntb_netdev *dev = netdev_priv(ndev);
289 struct sk_buff *skb;
290 int len;
291
292 ntb_transport_link_down(dev->qp);
293
294 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
295 dev_kfree_skb(skb);
296
Dave Jiange74bfee2015-07-13 08:07:17 -0400297 del_timer_sync(&dev->tx_timer);
298
Jon Mason548c2372012-11-16 19:27:13 -0700299 return 0;
300}
301
302static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
303{
304 struct ntb_netdev *dev = netdev_priv(ndev);
305 struct sk_buff *skb;
306 int len, rc;
307
308 if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
309 return -EINVAL;
310
311 if (!netif_running(ndev)) {
312 ndev->mtu = new_mtu;
313 return 0;
314 }
315
316 /* Bring down the link and dispose of posted rx entries */
317 ntb_transport_link_down(dev->qp);
318
319 if (ndev->mtu < new_mtu) {
320 int i;
321
322 for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
323 dev_kfree_skb(skb);
324
325 for (; i; i--) {
326 skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
327 if (!skb) {
328 rc = -ENOMEM;
329 goto err;
330 }
331
332 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
333 new_mtu + ETH_HLEN);
334 if (rc) {
335 dev_kfree_skb(skb);
336 goto err;
337 }
338 }
339 }
340
341 ndev->mtu = new_mtu;
342
343 ntb_transport_link_up(dev->qp);
344
345 return 0;
346
347err:
348 ntb_transport_link_down(dev->qp);
349
350 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
351 dev_kfree_skb(skb);
352
353 netdev_err(ndev, "Error changing MTU, device inoperable\n");
354 return rc;
355}
356
Jon Mason548c2372012-11-16 19:27:13 -0700357static const struct net_device_ops ntb_netdev_ops = {
358 .ndo_open = ntb_netdev_open,
359 .ndo_stop = ntb_netdev_close,
360 .ndo_start_xmit = ntb_netdev_start_xmit,
361 .ndo_change_mtu = ntb_netdev_change_mtu,
Jon Mason548c2372012-11-16 19:27:13 -0700362 .ndo_set_mac_address = eth_mac_addr,
363};
364
365static void ntb_get_drvinfo(struct net_device *ndev,
366 struct ethtool_drvinfo *info)
367{
368 struct ntb_netdev *dev = netdev_priv(ndev);
369
370 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
371 strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
372 strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
373}
374
Philippe Reynesa062d192017-03-09 23:10:13 +0100375static int ntb_get_link_ksettings(struct net_device *dev,
376 struct ethtool_link_ksettings *cmd)
Jon Mason548c2372012-11-16 19:27:13 -0700377{
Philippe Reynesa062d192017-03-09 23:10:13 +0100378 ethtool_link_ksettings_zero_link_mode(cmd, supported);
379 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
380 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
381 ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane);
382
383 cmd->base.speed = SPEED_UNKNOWN;
384 cmd->base.duplex = DUPLEX_FULL;
385 cmd->base.port = PORT_OTHER;
386 cmd->base.phy_address = 0;
387 cmd->base.autoneg = AUTONEG_ENABLE;
Jon Mason548c2372012-11-16 19:27:13 -0700388
389 return 0;
390}
391
392static const struct ethtool_ops ntb_ethtool_ops = {
393 .get_drvinfo = ntb_get_drvinfo,
394 .get_link = ethtool_op_get_link,
Philippe Reynesa062d192017-03-09 23:10:13 +0100395 .get_link_ksettings = ntb_get_link_ksettings,
Jon Mason548c2372012-11-16 19:27:13 -0700396};
397
398static const struct ntb_queue_handlers ntb_netdev_handlers = {
399 .tx_handler = ntb_netdev_tx_handler,
400 .rx_handler = ntb_netdev_rx_handler,
401 .event_handler = ntb_netdev_event_handler,
402};
403
Allen Hubbee26a5842015-04-09 10:33:20 -0400404static int ntb_netdev_probe(struct device *client_dev)
Jon Mason548c2372012-11-16 19:27:13 -0700405{
Allen Hubbee26a5842015-04-09 10:33:20 -0400406 struct ntb_dev *ntb;
Jon Mason548c2372012-11-16 19:27:13 -0700407 struct net_device *ndev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400408 struct pci_dev *pdev;
Jon Mason548c2372012-11-16 19:27:13 -0700409 struct ntb_netdev *dev;
410 int rc;
411
Allen Hubbee26a5842015-04-09 10:33:20 -0400412 ntb = dev_ntb(client_dev->parent);
413 pdev = ntb->pdev;
414 if (!pdev)
415 return -ENODEV;
416
417 ndev = alloc_etherdev(sizeof(*dev));
Jon Mason548c2372012-11-16 19:27:13 -0700418 if (!ndev)
419 return -ENOMEM;
420
421 dev = netdev_priv(ndev);
422 dev->ndev = ndev;
423 dev->pdev = pdev;
Jon Mason548c2372012-11-16 19:27:13 -0700424 ndev->features = NETIF_F_HIGHDMA;
425
426 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
427
428 ndev->hw_features = ndev->features;
429 ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
430
431 random_ether_addr(ndev->perm_addr);
432 memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
433
434 ndev->netdev_ops = &ntb_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000435 ndev->ethtool_ops = &ntb_ethtool_ops;
Jon Mason548c2372012-11-16 19:27:13 -0700436
Jarod Wilson91572082016-10-20 13:55:20 -0400437 ndev->min_mtu = 0;
438 ndev->max_mtu = ETH_MAX_MTU;
439
Allen Hubbee26a5842015-04-09 10:33:20 -0400440 dev->qp = ntb_transport_create_queue(ndev, client_dev,
441 &ntb_netdev_handlers);
Jon Mason548c2372012-11-16 19:27:13 -0700442 if (!dev->qp) {
443 rc = -EIO;
444 goto err;
445 }
446
447 ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
448
449 rc = register_netdev(ndev);
450 if (rc)
451 goto err1;
452
453 list_add(&dev->list, &dev_list);
Jon Mason7bcd2b12013-01-19 02:02:34 -0700454 dev_info(&pdev->dev, "%s created\n", ndev->name);
Jon Mason548c2372012-11-16 19:27:13 -0700455 return 0;
456
457err1:
458 ntb_transport_free_queue(dev->qp);
459err:
460 free_netdev(ndev);
461 return rc;
462}
463
Allen Hubbee26a5842015-04-09 10:33:20 -0400464static void ntb_netdev_remove(struct device *client_dev)
Jon Mason548c2372012-11-16 19:27:13 -0700465{
Allen Hubbee26a5842015-04-09 10:33:20 -0400466 struct ntb_dev *ntb;
Jon Mason548c2372012-11-16 19:27:13 -0700467 struct net_device *ndev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400468 struct pci_dev *pdev;
Jon Mason548c2372012-11-16 19:27:13 -0700469 struct ntb_netdev *dev;
Jon Mason53ca4fe2013-11-26 11:21:50 -0700470 bool found = false;
Jon Mason548c2372012-11-16 19:27:13 -0700471
Allen Hubbee26a5842015-04-09 10:33:20 -0400472 ntb = dev_ntb(client_dev->parent);
473 pdev = ntb->pdev;
474
Jon Mason548c2372012-11-16 19:27:13 -0700475 list_for_each_entry(dev, &dev_list, list) {
Jon Masonfea903e2013-11-22 16:44:13 -0700476 if (dev->pdev == pdev) {
477 found = true;
Jon Mason548c2372012-11-16 19:27:13 -0700478 break;
Jon Masonfea903e2013-11-22 16:44:13 -0700479 }
Jon Mason548c2372012-11-16 19:27:13 -0700480 }
Jon Masonfea903e2013-11-22 16:44:13 -0700481 if (!found)
Jon Mason548c2372012-11-16 19:27:13 -0700482 return;
483
Jon Mason904435c2013-04-18 13:36:43 -0700484 list_del(&dev->list);
485
Jon Mason548c2372012-11-16 19:27:13 -0700486 ndev = dev->ndev;
487
488 unregister_netdev(ndev);
489 ntb_transport_free_queue(dev->qp);
490 free_netdev(ndev);
491}
492
Allen Hubbee26a5842015-04-09 10:33:20 -0400493static struct ntb_transport_client ntb_netdev_client = {
Jon Mason548c2372012-11-16 19:27:13 -0700494 .driver.name = KBUILD_MODNAME,
495 .driver.owner = THIS_MODULE,
496 .probe = ntb_netdev_probe,
497 .remove = ntb_netdev_remove,
498};
499
500static int __init ntb_netdev_init_module(void)
501{
502 int rc;
503
Allen Hubbee26a5842015-04-09 10:33:20 -0400504 rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
Jon Mason548c2372012-11-16 19:27:13 -0700505 if (rc)
506 return rc;
Allen Hubbeec110bc2015-05-07 06:45:21 -0400507 return ntb_transport_register_client(&ntb_netdev_client);
Jon Mason548c2372012-11-16 19:27:13 -0700508}
509module_init(ntb_netdev_init_module);
510
511static void __exit ntb_netdev_exit_module(void)
512{
Allen Hubbeec110bc2015-05-07 06:45:21 -0400513 ntb_transport_unregister_client(&ntb_netdev_client);
Allen Hubbee26a5842015-04-09 10:33:20 -0400514 ntb_transport_unregister_client_dev(KBUILD_MODNAME);
Jon Mason548c2372012-11-16 19:27:13 -0700515}
516module_exit(ntb_netdev_exit_module);