blob: 2c2604e3f63318f7b1cbe0bf99ce9d6aa3134835 [file] [log] [blame]
Matt Porterf89efd52005-09-09 12:10:10 -07001/*
2 * rionet - Ethernet driver over RapidIO messaging services
3 *
4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/dma-mapping.h>
16#include <linux/delay.h>
17#include <linux/rio.h>
18#include <linux/rio_drv.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Matt Porterf89efd52005-09-09 12:10:10 -070020#include <linux/rio_ids.h>
21
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/crc32.h>
26#include <linux/ethtool.h>
Alexandre Bouninef41e2472016-03-22 14:26:11 -070027#include <linux/reboot.h>
Matt Porterf89efd52005-09-09 12:10:10 -070028
29#define DRV_NAME "rionet"
Alexandre Bounine2fb717e2012-10-04 17:16:11 -070030#define DRV_VERSION "0.3"
Matt Porterf89efd52005-09-09 12:10:10 -070031#define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
32#define DRV_DESC "Ethernet over RapidIO"
33
34MODULE_AUTHOR(DRV_AUTHOR);
35MODULE_DESCRIPTION(DRV_DESC);
36MODULE_LICENSE("GPL");
37
38#define RIONET_DEFAULT_MSGLEVEL \
39 (NETIF_MSG_DRV | \
40 NETIF_MSG_LINK | \
41 NETIF_MSG_RX_ERR | \
42 NETIF_MSG_TX_ERR)
43
44#define RIONET_DOORBELL_JOIN 0x1000
45#define RIONET_DOORBELL_LEAVE 0x1001
46
47#define RIONET_MAILBOX 0
48
49#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
50#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
Alexandre Bounine2fb717e2012-10-04 17:16:11 -070051#define RIONET_MAX_NETS 8
Aurelien Jacquiot92444bb2016-03-22 14:25:45 -070052#define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE
53#define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN)
Matt Porterf89efd52005-09-09 12:10:10 -070054
55struct rionet_private {
56 struct rio_mport *mport;
57 struct sk_buff *rx_skb[RIONET_RX_RING_SIZE];
58 struct sk_buff *tx_skb[RIONET_TX_RING_SIZE];
Matt Porterf89efd52005-09-09 12:10:10 -070059 int rx_slot;
60 int tx_slot;
61 int tx_cnt;
62 int ack_slot;
63 spinlock_t lock;
64 spinlock_t tx_lock;
65 u32 msg_enable;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -070066 bool open;
Matt Porterf89efd52005-09-09 12:10:10 -070067};
68
69struct rionet_peer {
70 struct list_head node;
71 struct rio_dev *rdev;
72 struct resource *res;
73};
74
Alexandre Bounine2fb717e2012-10-04 17:16:11 -070075struct rionet_net {
76 struct net_device *ndev;
77 struct list_head peers;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -070078 spinlock_t lock; /* net info access lock */
Alexandre Bounine2fb717e2012-10-04 17:16:11 -070079 struct rio_dev **active;
80 int nact; /* number of active peers */
81};
Matt Porterf89efd52005-09-09 12:10:10 -070082
Alexandre Bounine2fb717e2012-10-04 17:16:11 -070083static struct rionet_net nets[RIONET_MAX_NETS];
Matt Porterf89efd52005-09-09 12:10:10 -070084
Alexandre Bounine284fb682011-08-25 15:59:13 -070085#define is_rionet_capable(src_ops, dst_ops) \
86 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
87 (dst_ops & RIO_DST_OPS_DATA_MSG) && \
Matt Porterf89efd52005-09-09 12:10:10 -070088 (src_ops & RIO_SRC_OPS_DOORBELL) && \
89 (dst_ops & RIO_DST_OPS_DOORBELL))
90#define dev_rionet_capable(dev) \
Alexandre Bounine284fb682011-08-25 15:59:13 -070091 is_rionet_capable(dev->src_ops, dev->dst_ops)
Matt Porterf89efd52005-09-09 12:10:10 -070092
Alexandre Bouninee0c87bd2011-11-02 13:39:15 -070093#define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4))
94#define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
Matt Porterf89efd52005-09-09 12:10:10 -070095
Matt Porterf89efd52005-09-09 12:10:10 -070096static int rionet_rx_clean(struct net_device *ndev)
97{
98 int i;
99 int error = 0;
Wang Chen4cf16532008-11-12 23:38:14 -0800100 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700101 void *data;
102
103 i = rnet->rx_slot;
104
105 do {
106 if (!rnet->rx_skb[i])
107 continue;
108
109 if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
110 break;
111
112 rnet->rx_skb[i]->data = data;
113 skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
Matt Porterf89efd52005-09-09 12:10:10 -0700114 rnet->rx_skb[i]->protocol =
115 eth_type_trans(rnet->rx_skb[i], ndev);
116 error = netif_rx(rnet->rx_skb[i]);
117
118 if (error == NET_RX_DROP) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700119 ndev->stats.rx_dropped++;
Matt Porterf89efd52005-09-09 12:10:10 -0700120 } else {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700121 ndev->stats.rx_packets++;
122 ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE;
Matt Porterf89efd52005-09-09 12:10:10 -0700123 }
124
125 } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
126
127 return i;
128}
129
130static void rionet_rx_fill(struct net_device *ndev, int end)
131{
132 int i;
Wang Chen4cf16532008-11-12 23:38:14 -0800133 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700134
135 i = rnet->rx_slot;
136 do {
137 rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
138
139 if (!rnet->rx_skb[i])
140 break;
141
142 rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
143 rnet->rx_skb[i]->data);
144 } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end);
145
146 rnet->rx_slot = i;
147}
148
149static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
150 struct rio_dev *rdev)
151{
Wang Chen4cf16532008-11-12 23:38:14 -0800152 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700153
154 rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
155 rnet->tx_skb[rnet->tx_slot] = skb;
156
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700157 ndev->stats.tx_packets++;
158 ndev->stats.tx_bytes += skb->len;
Matt Porterf89efd52005-09-09 12:10:10 -0700159
160 if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
161 netif_stop_queue(ndev);
162
163 ++rnet->tx_slot;
164 rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
165
166 if (netif_msg_tx_queued(rnet))
David S. Miller8df8a472011-05-22 20:35:54 -0400167 printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME,
168 skb->len);
Matt Porterf89efd52005-09-09 12:10:10 -0700169
170 return 0;
171}
172
173static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
174{
175 int i;
Wang Chen4cf16532008-11-12 23:38:14 -0800176 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700177 struct ethhdr *eth = (struct ethhdr *)skb->data;
178 u16 destid;
179 unsigned long flags;
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700180 int add_num = 1;
Matt Porterf89efd52005-09-09 12:10:10 -0700181
Florian Westphala6086a82016-04-24 21:38:13 +0200182 spin_lock_irqsave(&rnet->tx_lock, flags);
Matt Porterf89efd52005-09-09 12:10:10 -0700183
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700184 if (is_multicast_ether_addr(eth->h_dest))
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700185 add_num = nets[rnet->mport->id].nact;
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700186
187 if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
Matt Porterf89efd52005-09-09 12:10:10 -0700188 netif_stop_queue(ndev);
189 spin_unlock_irqrestore(&rnet->tx_lock, flags);
190 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
191 ndev->name);
192 return NETDEV_TX_BUSY;
193 }
194
Tobias Klauserabfc89c2011-07-03 23:54:54 +0000195 if (is_multicast_ether_addr(eth->h_dest)) {
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700196 int count = 0;
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700197
Zhang Weie0423232008-04-18 13:33:42 -0700198 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
199 i++)
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700200 if (nets[rnet->mport->id].active[i]) {
Matt Porterf89efd52005-09-09 12:10:10 -0700201 rionet_queue_tx_msg(skb, ndev,
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700202 nets[rnet->mport->id].active[i]);
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700203 if (count)
204 atomic_inc(&skb->users);
205 count++;
206 }
Matt Porterf89efd52005-09-09 12:10:10 -0700207 } else if (RIONET_MAC_MATCH(eth->h_dest)) {
208 destid = RIONET_GET_DESTID(eth->h_dest);
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700209 if (nets[rnet->mport->id].active[destid])
210 rionet_queue_tx_msg(skb, ndev,
211 nets[rnet->mport->id].active[destid]);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700212 else {
213 /*
214 * If the target device was removed from the list of
215 * active peers but we still have TX packets targeting
216 * it just report sending a packet to the target
217 * (without actual packet transfer).
218 */
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700219 ndev->stats.tx_packets++;
220 ndev->stats.tx_bytes += skb->len;
Pan Bian13a3d892018-11-28 14:53:19 +0800221 dev_kfree_skb_any(skb);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700222 }
Matt Porterf89efd52005-09-09 12:10:10 -0700223 }
224
225 spin_unlock_irqrestore(&rnet->tx_lock, flags);
226
Patrick McHardy6ed10652009-06-23 06:03:08 +0000227 return NETDEV_TX_OK;
Matt Porterf89efd52005-09-09 12:10:10 -0700228}
229
230static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid,
231 u16 info)
232{
233 struct net_device *ndev = dev_id;
Wang Chen4cf16532008-11-12 23:38:14 -0800234 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700235 struct rionet_peer *peer;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700236 unsigned char netid = rnet->mport->id;
Matt Porterf89efd52005-09-09 12:10:10 -0700237
238 if (netif_msg_intr(rnet))
239 printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
240 DRV_NAME, sid, tid, info);
241 if (info == RIONET_DOORBELL_JOIN) {
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700242 if (!nets[netid].active[sid]) {
243 spin_lock(&nets[netid].lock);
244 list_for_each_entry(peer, &nets[netid].peers, node) {
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700245 if (peer->rdev->destid == sid) {
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700246 nets[netid].active[sid] = peer->rdev;
247 nets[netid].nact++;
Alexandre Bounine7c4a6102012-10-04 17:15:48 -0700248 }
Matt Porterf89efd52005-09-09 12:10:10 -0700249 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700250 spin_unlock(&nets[netid].lock);
251
Matt Porterf89efd52005-09-09 12:10:10 -0700252 rio_mport_send_doorbell(mport, sid,
253 RIONET_DOORBELL_JOIN);
254 }
255 } else if (info == RIONET_DOORBELL_LEAVE) {
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700256 spin_lock(&nets[netid].lock);
257 if (nets[netid].active[sid]) {
258 nets[netid].active[sid] = NULL;
259 nets[netid].nact--;
260 }
261 spin_unlock(&nets[netid].lock);
Matt Porterf89efd52005-09-09 12:10:10 -0700262 } else {
263 if (netif_msg_intr(rnet))
264 printk(KERN_WARNING "%s: unhandled doorbell\n",
265 DRV_NAME);
266 }
267}
268
269static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
270{
271 int n;
272 struct net_device *ndev = dev_id;
Wang Chen4cf16532008-11-12 23:38:14 -0800273 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700274
275 if (netif_msg_intr(rnet))
276 printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
277 DRV_NAME, mbox, slot);
278
279 spin_lock(&rnet->lock);
280 if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
281 rionet_rx_fill(ndev, n);
282 spin_unlock(&rnet->lock);
283}
284
285static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
286{
287 struct net_device *ndev = dev_id;
Wang Chen4cf16532008-11-12 23:38:14 -0800288 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700289
Aurelien Jacquiot36915972016-03-22 14:25:42 -0700290 spin_lock(&rnet->tx_lock);
Matt Porterf89efd52005-09-09 12:10:10 -0700291
292 if (netif_msg_intr(rnet))
293 printk(KERN_INFO
294 "%s: outbound message event, mbox %d slot %d\n",
295 DRV_NAME, mbox, slot);
296
297 while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
298 /* dma unmap single */
299 dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
300 rnet->tx_skb[rnet->ack_slot] = NULL;
301 ++rnet->ack_slot;
302 rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
303 rnet->tx_cnt--;
304 }
305
306 if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
307 netif_wake_queue(ndev);
308
Aurelien Jacquiot36915972016-03-22 14:25:42 -0700309 spin_unlock(&rnet->tx_lock);
Matt Porterf89efd52005-09-09 12:10:10 -0700310}
311
312static int rionet_open(struct net_device *ndev)
313{
314 int i, rc = 0;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700315 struct rionet_peer *peer;
Wang Chen4cf16532008-11-12 23:38:14 -0800316 struct rionet_private *rnet = netdev_priv(ndev);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700317 unsigned char netid = rnet->mport->id;
318 unsigned long flags;
Matt Porterf89efd52005-09-09 12:10:10 -0700319
320 if (netif_msg_ifup(rnet))
321 printk(KERN_INFO "%s: open\n", DRV_NAME);
322
323 if ((rc = rio_request_inb_dbell(rnet->mport,
324 (void *)ndev,
325 RIONET_DOORBELL_JOIN,
326 RIONET_DOORBELL_LEAVE,
327 rionet_dbell_event)) < 0)
328 goto out;
329
330 if ((rc = rio_request_inb_mbox(rnet->mport,
331 (void *)ndev,
332 RIONET_MAILBOX,
333 RIONET_RX_RING_SIZE,
334 rionet_inb_msg_event)) < 0)
335 goto out;
336
337 if ((rc = rio_request_outb_mbox(rnet->mport,
338 (void *)ndev,
339 RIONET_MAILBOX,
340 RIONET_TX_RING_SIZE,
341 rionet_outb_msg_event)) < 0)
342 goto out;
343
344 /* Initialize inbound message ring */
345 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
346 rnet->rx_skb[i] = NULL;
347 rnet->rx_slot = 0;
348 rionet_rx_fill(ndev, 0);
349
350 rnet->tx_slot = 0;
351 rnet->tx_cnt = 0;
352 rnet->ack_slot = 0;
353
354 netif_carrier_on(ndev);
355 netif_start_queue(ndev);
356
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700357 spin_lock_irqsave(&nets[netid].lock, flags);
358 list_for_each_entry(peer, &nets[netid].peers, node) {
Alexandre Bounine284fb682011-08-25 15:59:13 -0700359 /* Send a join message */
360 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
Matt Porterf89efd52005-09-09 12:10:10 -0700361 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700362 spin_unlock_irqrestore(&nets[netid].lock, flags);
363 rnet->open = true;
Matt Porterf89efd52005-09-09 12:10:10 -0700364
365 out:
366 return rc;
367}
368
369static int rionet_close(struct net_device *ndev)
370{
Wang Chen4cf16532008-11-12 23:38:14 -0800371 struct rionet_private *rnet = netdev_priv(ndev);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700372 struct rionet_peer *peer;
373 unsigned char netid = rnet->mport->id;
374 unsigned long flags;
Matt Porterf89efd52005-09-09 12:10:10 -0700375 int i;
376
377 if (netif_msg_ifup(rnet))
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700378 printk(KERN_INFO "%s: close %s\n", DRV_NAME, ndev->name);
Matt Porterf89efd52005-09-09 12:10:10 -0700379
380 netif_stop_queue(ndev);
381 netif_carrier_off(ndev);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700382 rnet->open = false;
Matt Porterf89efd52005-09-09 12:10:10 -0700383
384 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
Wei Yongjunaaff1e12009-02-25 00:18:12 +0000385 kfree_skb(rnet->rx_skb[i]);
Matt Porterf89efd52005-09-09 12:10:10 -0700386
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700387 spin_lock_irqsave(&nets[netid].lock, flags);
388 list_for_each_entry(peer, &nets[netid].peers, node) {
389 if (nets[netid].active[peer->rdev->destid]) {
Matt Porterf89efd52005-09-09 12:10:10 -0700390 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700391 nets[netid].active[peer->rdev->destid] = NULL;
Matt Porterf89efd52005-09-09 12:10:10 -0700392 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700393 if (peer->res)
394 rio_release_outb_dbell(peer->rdev, peer->res);
Matt Porterf89efd52005-09-09 12:10:10 -0700395 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700396 spin_unlock_irqrestore(&nets[netid].lock, flags);
Matt Porterf89efd52005-09-09 12:10:10 -0700397
398 rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
399 RIONET_DOORBELL_LEAVE);
400 rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
401 rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
402
403 return 0;
404}
405
Viresh Kumar71db87b2015-07-30 15:04:01 +0530406static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
Matt Porterf89efd52005-09-09 12:10:10 -0700407{
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700408 struct rio_dev *rdev = to_rio_dev(dev);
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700409 unsigned char netid = rdev->net->hport->id;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700410 struct rionet_peer *peer;
411 int state, found = 0;
412 unsigned long flags;
Matt Porterf89efd52005-09-09 12:10:10 -0700413
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700414 if (!dev_rionet_capable(rdev))
415 return;
416
417 spin_lock_irqsave(&nets[netid].lock, flags);
418 list_for_each_entry(peer, &nets[netid].peers, node) {
419 if (peer->rdev == rdev) {
420 list_del(&peer->node);
421 if (nets[netid].active[rdev->destid]) {
422 state = atomic_read(&rdev->state);
423 if (state != RIO_DEVICE_GONE &&
424 state != RIO_DEVICE_INITIALIZING) {
425 rio_send_doorbell(rdev,
426 RIONET_DOORBELL_LEAVE);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700427 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700428 nets[netid].active[rdev->destid] = NULL;
429 nets[netid].nact--;
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700430 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700431 found = 1;
432 break;
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700433 }
Matt Porterf89efd52005-09-09 12:10:10 -0700434 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700435 spin_unlock_irqrestore(&nets[netid].lock, flags);
436
437 if (found) {
438 if (peer->res)
439 rio_release_outb_dbell(rdev, peer->res);
440 kfree(peer);
441 }
Matt Porterf89efd52005-09-09 12:10:10 -0700442}
443
444static void rionet_get_drvinfo(struct net_device *ndev,
445 struct ethtool_drvinfo *info)
446{
Wang Chen4cf16532008-11-12 23:38:14 -0800447 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700448
Jiri Pirko7826d432013-01-06 00:44:26 +0000449 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
450 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
451 strlcpy(info->fw_version, "n/a", sizeof(info->fw_version));
452 strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
Matt Porterf89efd52005-09-09 12:10:10 -0700453}
454
455static u32 rionet_get_msglevel(struct net_device *ndev)
456{
Wang Chen4cf16532008-11-12 23:38:14 -0800457 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700458
459 return rnet->msg_enable;
460}
461
462static void rionet_set_msglevel(struct net_device *ndev, u32 value)
463{
Wang Chen4cf16532008-11-12 23:38:14 -0800464 struct rionet_private *rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700465
466 rnet->msg_enable = value;
467}
468
Aurelien Jacquiot92444bb2016-03-22 14:25:45 -0700469static int rionet_change_mtu(struct net_device *ndev, int new_mtu)
470{
471 if ((new_mtu < 68) || (new_mtu > RIONET_MAX_MTU)) {
472 printk(KERN_ERR "%s: Invalid MTU size %d\n",
473 ndev->name, new_mtu);
474 return -EINVAL;
475 }
476 ndev->mtu = new_mtu;
477 return 0;
478}
479
Jeff Garzik7282d492006-09-13 14:30:00 -0400480static const struct ethtool_ops rionet_ethtool_ops = {
Matt Porterf89efd52005-09-09 12:10:10 -0700481 .get_drvinfo = rionet_get_drvinfo,
482 .get_msglevel = rionet_get_msglevel,
483 .set_msglevel = rionet_set_msglevel,
484 .get_link = ethtool_op_get_link,
485};
486
Alexander Beregalova33a2bb2009-04-15 12:52:56 +0000487static const struct net_device_ops rionet_netdev_ops = {
488 .ndo_open = rionet_open,
489 .ndo_stop = rionet_close,
490 .ndo_start_xmit = rionet_start_xmit,
Aurelien Jacquiot92444bb2016-03-22 14:25:45 -0700491 .ndo_change_mtu = rionet_change_mtu,
Alexander Beregalova33a2bb2009-04-15 12:52:56 +0000492 .ndo_validate_addr = eth_validate_addr,
493 .ndo_set_mac_address = eth_mac_addr,
494};
495
Yinglin Luan55caa922011-06-25 18:12:12 +0000496static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
Matt Porterf89efd52005-09-09 12:10:10 -0700497{
498 int rc = 0;
Matt Porterf89efd52005-09-09 12:10:10 -0700499 struct rionet_private *rnet;
500 u16 device_id;
Akinobu Mitaacc65632012-03-30 01:01:46 +0000501 const size_t rionet_active_bytes = sizeof(void *) *
502 RIO_MAX_ROUTE_ENTRIES(mport->sys_size);
Matt Porterf89efd52005-09-09 12:10:10 -0700503
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700504 nets[mport->id].active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
505 get_order(rionet_active_bytes));
506 if (!nets[mport->id].active) {
Zhang Weie0423232008-04-18 13:33:42 -0700507 rc = -ENOMEM;
508 goto out;
509 }
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700510 memset((void *)nets[mport->id].active, 0, rionet_active_bytes);
Zhang Weie0423232008-04-18 13:33:42 -0700511
Matt Porterf89efd52005-09-09 12:10:10 -0700512 /* Set up private area */
Wang Chen4cf16532008-11-12 23:38:14 -0800513 rnet = netdev_priv(ndev);
Matt Porterf89efd52005-09-09 12:10:10 -0700514 rnet->mport = mport;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700515 rnet->open = false;
Matt Porterf89efd52005-09-09 12:10:10 -0700516
517 /* Set the default MAC address */
518 device_id = rio_local_get_device_id(mport);
519 ndev->dev_addr[0] = 0x00;
520 ndev->dev_addr[1] = 0x01;
521 ndev->dev_addr[2] = 0x00;
522 ndev->dev_addr[3] = 0x01;
523 ndev->dev_addr[4] = device_id >> 8;
524 ndev->dev_addr[5] = device_id & 0xff;
525
Alexander Beregalova33a2bb2009-04-15 12:52:56 +0000526 ndev->netdev_ops = &rionet_netdev_ops;
Aurelien Jacquiot92444bb2016-03-22 14:25:45 -0700527 ndev->mtu = RIONET_MAX_MTU;
Matt Porterf89efd52005-09-09 12:10:10 -0700528 ndev->features = NETIF_F_LLTX;
Alexandre Bounine2aaf3082014-04-07 15:38:56 -0700529 SET_NETDEV_DEV(ndev, &mport->dev);
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000530 ndev->ethtool_ops = &rionet_ethtool_ops;
Matt Porterf89efd52005-09-09 12:10:10 -0700531
Matt Porterf89efd52005-09-09 12:10:10 -0700532 spin_lock_init(&rnet->lock);
533 spin_lock_init(&rnet->tx_lock);
534
535 rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
536
537 rc = register_netdev(ndev);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700538 if (rc != 0) {
539 free_pages((unsigned long)nets[mport->id].active,
540 get_order(rionet_active_bytes));
Matt Porterf89efd52005-09-09 12:10:10 -0700541 goto out;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700542 }
Matt Porterf89efd52005-09-09 12:10:10 -0700543
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700544 printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n",
Matt Porterf89efd52005-09-09 12:10:10 -0700545 ndev->name,
546 DRV_NAME,
547 DRV_DESC,
548 DRV_VERSION,
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700549 ndev->dev_addr,
550 mport->name);
Matt Porterf89efd52005-09-09 12:10:10 -0700551
552 out:
553 return rc;
554}
555
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700556static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
Matt Porterf89efd52005-09-09 12:10:10 -0700557{
558 int rc = -ENODEV;
Alexandre Bounine284fb682011-08-25 15:59:13 -0700559 u32 lsrc_ops, ldst_ops;
Matt Porterf89efd52005-09-09 12:10:10 -0700560 struct rionet_peer *peer;
Yinglin Luan55caa922011-06-25 18:12:12 +0000561 struct net_device *ndev = NULL;
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700562 struct rio_dev *rdev = to_rio_dev(dev);
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700563 unsigned char netid = rdev->net->hport->id;
Matt Porterf89efd52005-09-09 12:10:10 -0700564
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700565 if (netid >= RIONET_MAX_NETS)
566 return rc;
Matt Porterf89efd52005-09-09 12:10:10 -0700567
568 /*
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700569 * If first time through this net, make sure local device is rionet
570 * capable and setup netdev (this step will be skipped in later probes
571 * on the same net).
Matt Porterf89efd52005-09-09 12:10:10 -0700572 */
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700573 if (!nets[netid].ndev) {
Matt Porterf89efd52005-09-09 12:10:10 -0700574 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
575 &lsrc_ops);
576 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
577 &ldst_ops);
Alexandre Bounine284fb682011-08-25 15:59:13 -0700578 if (!is_rionet_capable(lsrc_ops, ldst_ops)) {
Matt Porterf89efd52005-09-09 12:10:10 -0700579 printk(KERN_ERR
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700580 "%s: local device %s is not network capable\n",
581 DRV_NAME, rdev->net->hport->name);
Matt Porterf89efd52005-09-09 12:10:10 -0700582 goto out;
583 }
584
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700585 /* Allocate our net_device structure */
586 ndev = alloc_etherdev(sizeof(struct rionet_private));
587 if (ndev == NULL) {
588 rc = -ENOMEM;
589 goto out;
590 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700591
Yinglin Luan55caa922011-06-25 18:12:12 +0000592 rc = rionet_setup_netdev(rdev->net->hport, ndev);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700593 if (rc) {
594 printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n",
595 DRV_NAME, rc);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700596 free_netdev(ndev);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700597 goto out;
598 }
599
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700600 INIT_LIST_HEAD(&nets[netid].peers);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700601 spin_lock_init(&nets[netid].lock);
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700602 nets[netid].nact = 0;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700603 nets[netid].ndev = ndev;
604 }
Matt Porterf89efd52005-09-09 12:10:10 -0700605
606 /*
607 * If the remote device has mailbox/doorbell capabilities,
608 * add it to the peer list.
609 */
610 if (dev_rionet_capable(rdev)) {
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700611 struct rionet_private *rnet;
612 unsigned long flags;
613
614 rnet = netdev_priv(nets[netid].ndev);
615
616 peer = kzalloc(sizeof(*peer), GFP_KERNEL);
617 if (!peer) {
Matt Porterf89efd52005-09-09 12:10:10 -0700618 rc = -ENOMEM;
619 goto out;
620 }
621 peer->rdev = rdev;
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700622 peer->res = rio_request_outb_dbell(peer->rdev,
623 RIONET_DOORBELL_JOIN,
624 RIONET_DOORBELL_LEAVE);
625 if (!peer->res) {
626 pr_err("%s: error requesting doorbells\n", DRV_NAME);
627 kfree(peer);
628 rc = -ENOMEM;
629 goto out;
630 }
631
632 spin_lock_irqsave(&nets[netid].lock, flags);
Alexandre Bounine2fb717e2012-10-04 17:16:11 -0700633 list_add_tail(&peer->node, &nets[netid].peers);
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700634 spin_unlock_irqrestore(&nets[netid].lock, flags);
635 pr_debug("%s: %s add peer %s\n",
636 DRV_NAME, __func__, rio_name(rdev));
637
638 /* If netdev is already opened, send join request to new peer */
639 if (rnet->open)
640 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
Matt Porterf89efd52005-09-09 12:10:10 -0700641 }
642
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700643 return 0;
644out:
Matt Porterf89efd52005-09-09 12:10:10 -0700645 return rc;
646}
647
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700648static int rionet_shutdown(struct notifier_block *nb, unsigned long code,
649 void *unused)
650{
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700651 struct rionet_peer *peer;
652 unsigned long flags;
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700653 int i;
654
655 pr_debug("%s: %s\n", DRV_NAME, __func__);
656
657 for (i = 0; i < RIONET_MAX_NETS; i++) {
658 if (!nets[i].ndev)
659 continue;
660
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700661 spin_lock_irqsave(&nets[i].lock, flags);
662 list_for_each_entry(peer, &nets[i].peers, node) {
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700663 if (nets[i].active[peer->rdev->destid]) {
664 rio_send_doorbell(peer->rdev,
665 RIONET_DOORBELL_LEAVE);
666 nets[i].active[peer->rdev->destid] = NULL;
667 }
668 }
Alexandre Bounine34ed2eb2016-03-22 14:26:29 -0700669 spin_unlock_irqrestore(&nets[i].lock, flags);
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700670 }
671
672 return NOTIFY_DONE;
673}
674
Alexandre Bounineb7dfca82016-03-22 14:26:32 -0700675static void rionet_remove_mport(struct device *dev,
676 struct class_interface *class_intf)
677{
678 struct rio_mport *mport = to_rio_mport(dev);
679 struct net_device *ndev;
680 int id = mport->id;
681
682 pr_debug("%s %s\n", __func__, mport->name);
683
684 WARN(nets[id].nact, "%s called when connected to %d peers\n",
685 __func__, nets[id].nact);
686 WARN(!nets[id].ndev, "%s called for mport without NDEV\n",
687 __func__);
688
689 if (nets[id].ndev) {
690 ndev = nets[id].ndev;
691 netif_stop_queue(ndev);
692 unregister_netdev(ndev);
693
694 free_pages((unsigned long)nets[id].active,
695 get_order(sizeof(void *) *
696 RIO_MAX_ROUTE_ENTRIES(mport->sys_size)));
697 nets[id].active = NULL;
698 free_netdev(ndev);
699 nets[id].ndev = NULL;
700 }
701}
702
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700703#ifdef MODULE
Matt Porterf89efd52005-09-09 12:10:10 -0700704static struct rio_device_id rionet_id_table[] = {
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700705 {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)},
706 { 0, } /* terminate list */
Matt Porterf89efd52005-09-09 12:10:10 -0700707};
708
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700709MODULE_DEVICE_TABLE(rapidio, rionet_id_table);
710#endif
711
712static struct subsys_interface rionet_interface = {
713 .name = "rionet",
714 .subsys = &rio_bus_type,
715 .add_dev = rionet_add_dev,
716 .remove_dev = rionet_remove_dev,
Matt Porterf89efd52005-09-09 12:10:10 -0700717};
718
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700719static struct notifier_block rionet_notifier = {
720 .notifier_call = rionet_shutdown,
721};
722
Alexandre Bounineb7dfca82016-03-22 14:26:32 -0700723/* the rio_mport_interface is used to handle local mport devices */
724static struct class_interface rio_mport_interface __refdata = {
725 .class = &rio_mport_class,
726 .add_dev = NULL,
727 .remove_dev = rionet_remove_mport,
728};
729
Matt Porterf89efd52005-09-09 12:10:10 -0700730static int __init rionet_init(void)
731{
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700732 int ret;
733
734 ret = register_reboot_notifier(&rionet_notifier);
735 if (ret) {
736 pr_err("%s: failed to register reboot notifier (err=%d)\n",
737 DRV_NAME, ret);
738 return ret;
739 }
Alexandre Bounineb7dfca82016-03-22 14:26:32 -0700740
741 ret = class_interface_register(&rio_mport_interface);
742 if (ret) {
743 pr_err("%s: class_interface_register error: %d\n",
744 DRV_NAME, ret);
745 return ret;
746 }
747
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700748 return subsys_interface_register(&rionet_interface);
Matt Porterf89efd52005-09-09 12:10:10 -0700749}
750
751static void __exit rionet_exit(void)
752{
Alexandre Bouninef41e2472016-03-22 14:26:11 -0700753 unregister_reboot_notifier(&rionet_notifier);
Alexandre Bouninee6161d62013-07-03 15:08:52 -0700754 subsys_interface_unregister(&rionet_interface);
Alexandre Bounineb7dfca82016-03-22 14:26:32 -0700755 class_interface_unregister(&rio_mport_interface);
Matt Porterf89efd52005-09-09 12:10:10 -0700756}
757
Alexandre Bounine2f809982011-03-23 16:43:04 -0700758late_initcall(rionet_init);
Matt Porterf89efd52005-09-09 12:10:10 -0700759module_exit(rionet_exit);