blob: b987bb6a5e1cd405da7c9d026ed85f58bbdf5a13 [file] [log] [blame]
Alexander Duyck0e7b3642014-09-20 19:48:10 -04001/* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19 */
20
21#include "fm10k.h"
22
Alexander Duyck504c5ea2014-09-20 19:48:29 -040023/**
24 * fm10k_request_glort_range - Request GLORTs for use in configuring rules
25 * @interface: board private structure
26 *
27 * This function allocates a range of glorts for this inteface to use.
28 **/
29static void fm10k_request_glort_range(struct fm10k_intfc *interface)
30{
31 struct fm10k_hw *hw = &interface->hw;
32 u16 mask = (~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT;
33
34 /* establish GLORT base */
35 interface->glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
36 interface->glort_count = 0;
37
38 /* nothing we can do until mask is allocated */
39 if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
40 return;
41
42 interface->glort_count = mask + 1;
43}
44
45/**
46 * fm10k_open - Called when a network interface is made active
47 * @netdev: network interface device structure
48 *
49 * Returns 0 on success, negative value on failure
50 *
51 * The open entry point is called when a network interface is made
52 * active by the system (IFF_UP). At this point all resources needed
53 * for transmit and receive operations are allocated, the interrupt
54 * handler is registered with the OS, the watchdog timer is started,
55 * and the stack is notified that the interface is ready.
56 **/
57int fm10k_open(struct net_device *netdev)
58{
59 struct fm10k_intfc *interface = netdev_priv(netdev);
Alexander Duyck18283ca2014-09-20 19:48:51 -040060 int err;
61
62 /* allocate interrupt resources */
63 err = fm10k_qv_request_irq(interface);
64 if (err)
65 goto err_req_irq;
Alexander Duyck504c5ea2014-09-20 19:48:29 -040066
67 /* setup GLORT assignment for this port */
68 fm10k_request_glort_range(interface);
69
Alexander Duycke27ef592014-09-20 19:49:03 -040070 /* Notify the stack of the actual queue counts */
71
72 err = netif_set_real_num_rx_queues(netdev,
73 interface->num_rx_queues);
74 if (err)
75 goto err_set_queues;
76
Alexander Duyck504c5ea2014-09-20 19:48:29 -040077 fm10k_up(interface);
78
79 return 0;
Alexander Duyck18283ca2014-09-20 19:48:51 -040080
Alexander Duycke27ef592014-09-20 19:49:03 -040081err_set_queues:
82 fm10k_qv_free_irq(interface);
Alexander Duyck18283ca2014-09-20 19:48:51 -040083err_req_irq:
84 return err;
Alexander Duyck504c5ea2014-09-20 19:48:29 -040085}
86
87/**
88 * fm10k_close - Disables a network interface
89 * @netdev: network interface device structure
90 *
91 * Returns 0, this is not allowed to fail
92 *
93 * The close entry point is called when an interface is de-activated
94 * by the OS. The hardware is still under the drivers control, but
95 * needs to be disabled. A global MAC reset is issued to stop the
96 * hardware, and all transmit and receive resources are freed.
97 **/
98int fm10k_close(struct net_device *netdev)
99{
100 struct fm10k_intfc *interface = netdev_priv(netdev);
101
102 fm10k_down(interface);
103
Alexander Duyck18283ca2014-09-20 19:48:51 -0400104 fm10k_qv_free_irq(interface);
105
Alexander Duyck504c5ea2014-09-20 19:48:29 -0400106 return 0;
107}
108
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400109static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
110{
111 dev_kfree_skb_any(skb);
112 return NETDEV_TX_OK;
113}
114
115static int fm10k_change_mtu(struct net_device *dev, int new_mtu)
116{
117 if (new_mtu < 68 || new_mtu > FM10K_MAX_JUMBO_FRAME_SIZE)
118 return -EINVAL;
119
120 dev->mtu = new_mtu;
121
122 return 0;
123}
124
Alexander Duyck8f5e20d2014-09-20 19:48:20 -0400125static int fm10k_uc_vlan_unsync(struct net_device *netdev,
126 const unsigned char *uc_addr)
127{
128 struct fm10k_intfc *interface = netdev_priv(netdev);
129 struct fm10k_hw *hw = &interface->hw;
130 u16 glort = interface->glort;
131 u16 vid = interface->vid;
132 bool set = !!(vid / VLAN_N_VID);
133 int err;
134
135 /* drop any leading bits on the VLAN ID */
136 vid &= VLAN_N_VID - 1;
137
138 err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, vid, set, 0);
139 if (err)
140 return err;
141
142 /* return non-zero value as we are only doing a partial sync/unsync */
143 return 1;
144}
145
146static int fm10k_mc_vlan_unsync(struct net_device *netdev,
147 const unsigned char *mc_addr)
148{
149 struct fm10k_intfc *interface = netdev_priv(netdev);
150 struct fm10k_hw *hw = &interface->hw;
151 u16 glort = interface->glort;
152 u16 vid = interface->vid;
153 bool set = !!(vid / VLAN_N_VID);
154 int err;
155
156 /* drop any leading bits on the VLAN ID */
157 vid &= VLAN_N_VID - 1;
158
159 err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set);
160 if (err)
161 return err;
162
163 /* return non-zero value as we are only doing a partial sync/unsync */
164 return 1;
165}
166
167static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
168{
169 struct fm10k_intfc *interface = netdev_priv(netdev);
170 struct fm10k_hw *hw = &interface->hw;
171 s32 err;
172
173 /* updates do not apply to VLAN 0 */
174 if (!vid)
175 return 0;
176
177 if (vid >= VLAN_N_VID)
178 return -EINVAL;
179
180 /* Verify we have permission to add VLANs */
181 if (hw->mac.vlan_override)
182 return -EACCES;
183
184 /* if default VLAN is already present do nothing */
185 if (vid == hw->mac.default_vid)
186 return -EBUSY;
187
188 /* update active_vlans bitmask */
189 set_bit(vid, interface->active_vlans);
190 if (!set)
191 clear_bit(vid, interface->active_vlans);
192
193 fm10k_mbx_lock(interface);
194
195 /* only need to update the VLAN if not in promiscous mode */
196 if (!(netdev->flags & IFF_PROMISC)) {
197 err = hw->mac.ops.update_vlan(hw, vid, 0, set);
198 if (err)
199 return err;
200 }
201
202 /* update our base MAC address */
203 err = hw->mac.ops.update_uc_addr(hw, interface->glort, hw->mac.addr,
204 vid, set, 0);
205 if (err)
206 return err;
207
208 /* set vid prior to syncing/unsyncing the VLAN */
209 interface->vid = vid + (set ? VLAN_N_VID : 0);
210
211 /* Update the unicast and multicast address list to add/drop VLAN */
212 __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync);
213 __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync);
214
215 fm10k_mbx_unlock(interface);
216
217 return 0;
218}
219
220static int fm10k_vlan_rx_add_vid(struct net_device *netdev,
221 __always_unused __be16 proto, u16 vid)
222{
223 /* update VLAN and address table based on changes */
224 return fm10k_update_vid(netdev, vid, true);
225}
226
227static int fm10k_vlan_rx_kill_vid(struct net_device *netdev,
228 __always_unused __be16 proto, u16 vid)
229{
230 /* update VLAN and address table based on changes */
231 return fm10k_update_vid(netdev, vid, false);
232}
233
234static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid)
235{
236 struct fm10k_hw *hw = &interface->hw;
237 u16 default_vid = hw->mac.default_vid;
238 u16 vid_limit = vid < default_vid ? default_vid : VLAN_N_VID;
239
240 vid = find_next_bit(interface->active_vlans, vid_limit, ++vid);
241
242 return vid;
243}
244
245static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface)
246{
247 struct fm10k_hw *hw = &interface->hw;
248 u32 vid, prev_vid;
249
250 /* loop through and find any gaps in the table */
251 for (vid = 0, prev_vid = 0;
252 prev_vid < VLAN_N_VID;
253 prev_vid = vid + 1, vid = fm10k_find_next_vlan(interface, vid)) {
254 if (prev_vid == vid)
255 continue;
256
257 /* send request to clear multiple bits at a time */
258 prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT;
259 hw->mac.ops.update_vlan(hw, prev_vid, 0, false);
260 }
261}
262
263static int __fm10k_uc_sync(struct net_device *dev,
264 const unsigned char *addr, bool sync)
265{
266 struct fm10k_intfc *interface = netdev_priv(dev);
267 struct fm10k_hw *hw = &interface->hw;
268 u16 vid, glort = interface->glort;
269 s32 err;
270
271 if (!is_valid_ether_addr(addr))
272 return -EADDRNOTAVAIL;
273
274 /* update table with current entries */
275 for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
276 vid < VLAN_N_VID;
277 vid = fm10k_find_next_vlan(interface, vid)) {
278 err = hw->mac.ops.update_uc_addr(hw, glort, addr,
279 vid, sync, 0);
280 if (err)
281 return err;
282 }
283
284 return 0;
285}
286
287static int fm10k_uc_sync(struct net_device *dev,
288 const unsigned char *addr)
289{
290 return __fm10k_uc_sync(dev, addr, true);
291}
292
293static int fm10k_uc_unsync(struct net_device *dev,
294 const unsigned char *addr)
295{
296 return __fm10k_uc_sync(dev, addr, false);
297}
298
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400299static int fm10k_set_mac(struct net_device *dev, void *p)
300{
Alexander Duyck8f5e20d2014-09-20 19:48:20 -0400301 struct fm10k_intfc *interface = netdev_priv(dev);
302 struct fm10k_hw *hw = &interface->hw;
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400303 struct sockaddr *addr = p;
304 s32 err = 0;
305
306 if (!is_valid_ether_addr(addr->sa_data))
307 return -EADDRNOTAVAIL;
308
Alexander Duyck8f5e20d2014-09-20 19:48:20 -0400309 if (dev->flags & IFF_UP) {
310 /* setting MAC address requires mailbox */
311 fm10k_mbx_lock(interface);
312
313 err = fm10k_uc_sync(dev, addr->sa_data);
314 if (!err)
315 fm10k_uc_unsync(dev, hw->mac.addr);
316
317 fm10k_mbx_unlock(interface);
318 }
319
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400320 if (!err) {
321 ether_addr_copy(dev->dev_addr, addr->sa_data);
Alexander Duyck8f5e20d2014-09-20 19:48:20 -0400322 ether_addr_copy(hw->mac.addr, addr->sa_data);
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400323 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
324 }
325
Alexander Duyck8f5e20d2014-09-20 19:48:20 -0400326 /* if we had a mailbox error suggest trying again */
327 return err ? -EAGAIN : 0;
328}
329
330static int __fm10k_mc_sync(struct net_device *dev,
331 const unsigned char *addr, bool sync)
332{
333 struct fm10k_intfc *interface = netdev_priv(dev);
334 struct fm10k_hw *hw = &interface->hw;
335 u16 vid, glort = interface->glort;
336 s32 err;
337
338 if (!is_multicast_ether_addr(addr))
339 return -EADDRNOTAVAIL;
340
341 /* update table with current entries */
342 for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
343 vid < VLAN_N_VID;
344 vid = fm10k_find_next_vlan(interface, vid)) {
345 err = hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
346 if (err)
347 return err;
348 }
349
350 return 0;
351}
352
353static int fm10k_mc_sync(struct net_device *dev,
354 const unsigned char *addr)
355{
356 return __fm10k_mc_sync(dev, addr, true);
357}
358
359static int fm10k_mc_unsync(struct net_device *dev,
360 const unsigned char *addr)
361{
362 return __fm10k_mc_sync(dev, addr, false);
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400363}
364
365static void fm10k_set_rx_mode(struct net_device *dev)
366{
Alexander Duyck8f5e20d2014-09-20 19:48:20 -0400367 struct fm10k_intfc *interface = netdev_priv(dev);
368 struct fm10k_hw *hw = &interface->hw;
369 int xcast_mode;
370
371 /* no need to update the harwdare if we are not running */
372 if (!(dev->flags & IFF_UP))
373 return;
374
375 /* determine new mode based on flags */
376 xcast_mode = (dev->flags & IFF_PROMISC) ? FM10K_XCAST_MODE_PROMISC :
377 (dev->flags & IFF_ALLMULTI) ? FM10K_XCAST_MODE_ALLMULTI :
378 (dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
379 FM10K_XCAST_MODE_MULTI : FM10K_XCAST_MODE_NONE;
380
381 fm10k_mbx_lock(interface);
382
383 /* syncronize all of the addresses */
384 if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
385 __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
386 if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
387 __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
388 }
389
390 /* if we aren't changing modes there is nothing to do */
391 if (interface->xcast_mode != xcast_mode) {
392 /* update VLAN table */
393 if (xcast_mode == FM10K_XCAST_MODE_PROMISC)
394 hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, true);
395 if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
396 fm10k_clear_unused_vlans(interface);
397
398 /* update xcast mode */
399 hw->mac.ops.update_xcast_mode(hw, interface->glort, xcast_mode);
400
401 /* record updated xcast mode state */
402 interface->xcast_mode = xcast_mode;
403 }
404
405 fm10k_mbx_unlock(interface);
406}
407
408void fm10k_restore_rx_state(struct fm10k_intfc *interface)
409{
410 struct net_device *netdev = interface->netdev;
411 struct fm10k_hw *hw = &interface->hw;
412 int xcast_mode;
413 u16 vid, glort;
414
415 /* record glort for this interface */
416 glort = interface->glort;
417
418 /* convert interface flags to xcast mode */
419 if (netdev->flags & IFF_PROMISC)
420 xcast_mode = FM10K_XCAST_MODE_PROMISC;
421 else if (netdev->flags & IFF_ALLMULTI)
422 xcast_mode = FM10K_XCAST_MODE_ALLMULTI;
423 else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST))
424 xcast_mode = FM10K_XCAST_MODE_MULTI;
425 else
426 xcast_mode = FM10K_XCAST_MODE_NONE;
427
428 fm10k_mbx_lock(interface);
429
430 /* Enable logical port */
431 hw->mac.ops.update_lport_state(hw, glort, interface->glort_count, true);
432
433 /* update VLAN table */
434 hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0,
435 xcast_mode == FM10K_XCAST_MODE_PROMISC);
436
437 /* Add filter for VLAN 0 */
438 hw->mac.ops.update_vlan(hw, 0, 0, true);
439
440 /* update table with current entries */
441 for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
442 vid < VLAN_N_VID;
443 vid = fm10k_find_next_vlan(interface, vid)) {
444 hw->mac.ops.update_vlan(hw, vid, 0, true);
445 hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr,
446 vid, true, 0);
447 }
448
449 /* syncronize all of the addresses */
450 if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
451 __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
452 if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
453 __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
454 }
455
456 /* update xcast mode */
457 hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
458
459 fm10k_mbx_unlock(interface);
460
461 /* record updated xcast mode state */
462 interface->xcast_mode = xcast_mode;
463}
464
465void fm10k_reset_rx_state(struct fm10k_intfc *interface)
466{
467 struct net_device *netdev = interface->netdev;
468 struct fm10k_hw *hw = &interface->hw;
469
470 fm10k_mbx_lock(interface);
471
472 /* clear the logical port state on lower device */
473 hw->mac.ops.update_lport_state(hw, interface->glort,
474 interface->glort_count, false);
475
476 fm10k_mbx_unlock(interface);
477
478 /* reset flags to default state */
479 interface->xcast_mode = FM10K_XCAST_MODE_NONE;
480
481 /* clear the sync flag since the lport has been dropped */
482 __dev_uc_unsync(netdev, NULL);
483 __dev_mc_unsync(netdev, NULL);
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400484}
485
Alexander Duycke27ef592014-09-20 19:49:03 -0400486/**
487 * fm10k_get_stats64 - Get System Network Statistics
488 * @netdev: network interface device structure
489 * @stats: storage space for 64bit statistics
490 *
491 * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This
492 * function replaces fm10k_get_stats for kernels which support it.
493 */
494static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
495 struct rtnl_link_stats64 *stats)
496{
497 struct fm10k_intfc *interface = netdev_priv(netdev);
498 struct fm10k_ring *ring;
499 unsigned int start, i;
500 u64 bytes, packets;
501
502 rcu_read_lock();
503
504 for (i = 0; i < interface->num_rx_queues; i++) {
505 ring = ACCESS_ONCE(interface->rx_ring[i]);
506
507 if (!ring)
508 continue;
509
510 do {
511 start = u64_stats_fetch_begin_irq(&ring->syncp);
512 packets = ring->stats.packets;
513 bytes = ring->stats.bytes;
514 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
515
516 stats->rx_packets += packets;
517 stats->rx_bytes += bytes;
518 }
519
520 for (i = 0; i < interface->num_tx_queues; i++) {
521 ring = ACCESS_ONCE(interface->rx_ring[i]);
522
523 if (!ring)
524 continue;
525
526 do {
527 start = u64_stats_fetch_begin_irq(&ring->syncp);
528 packets = ring->stats.packets;
529 bytes = ring->stats.bytes;
530 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
531
532 stats->tx_packets += packets;
533 stats->tx_bytes += bytes;
534 }
535
536 rcu_read_unlock();
537
538 /* following stats updated by fm10k_service_task() */
539 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
540
541 return stats;
542}
543
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400544static const struct net_device_ops fm10k_netdev_ops = {
Alexander Duyck504c5ea2014-09-20 19:48:29 -0400545 .ndo_open = fm10k_open,
546 .ndo_stop = fm10k_close,
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400547 .ndo_validate_addr = eth_validate_addr,
548 .ndo_start_xmit = fm10k_xmit_frame,
549 .ndo_set_mac_address = fm10k_set_mac,
550 .ndo_change_mtu = fm10k_change_mtu,
Alexander Duyck8f5e20d2014-09-20 19:48:20 -0400551 .ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid,
552 .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid,
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400553 .ndo_set_rx_mode = fm10k_set_rx_mode,
Alexander Duycke27ef592014-09-20 19:49:03 -0400554 .ndo_get_stats64 = fm10k_get_stats64,
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400555};
556
557#define DEFAULT_DEBUG_LEVEL_SHIFT 3
558
559struct net_device *fm10k_alloc_netdev(void)
560{
561 struct fm10k_intfc *interface;
562 struct net_device *dev;
563
Alexander Duycke27ef592014-09-20 19:49:03 -0400564 dev = alloc_etherdev_mq(sizeof(struct fm10k_intfc), MAX_QUEUES);
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400565 if (!dev)
566 return NULL;
567
568 /* set net device and ethtool ops */
569 dev->netdev_ops = &fm10k_netdev_ops;
570
571 /* configure default debug level */
572 interface = netdev_priv(dev);
573 interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
574
575 /* configure default features */
576 dev->features |= NETIF_F_SG;
577
578 /* all features defined to this point should be changeable */
579 dev->hw_features |= dev->features;
580
581 /* configure VLAN features */
582 dev->vlan_features |= dev->features;
583
584 /* configure tunnel offloads */
585 dev->hw_enc_features = NETIF_F_SG;
586
Alexander Duyck8f5e20d2014-09-20 19:48:20 -0400587 /* we want to leave these both on as we cannot disable VLAN tag
588 * insertion or stripping on the hardware since it is contained
589 * in the FTAG and not in the frame itself.
590 */
591 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
592 NETIF_F_HW_VLAN_CTAG_RX |
593 NETIF_F_HW_VLAN_CTAG_FILTER;
594
595 dev->priv_flags |= IFF_UNICAST_FLT;
596
Alexander Duyck0e7b3642014-09-20 19:48:10 -0400597 return dev;
598}