blob: db3e36401712a42ec30a70b52a93f60ecf760743 [file] [log] [blame]
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010019#include <linux/hashtable.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010020#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
Scott Feldman6c707942014-11-28 14:34:28 +010031#include <linux/if_bridge.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010032#include <linux/bitops.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010033#include <net/switchdev.h>
34#include <net/rtnetlink.h>
35#include <asm-generic/io-64-nonatomic-lo-hi.h>
36#include <generated/utsrelease.h>
37
38#include "rocker.h"
39
40static const char rocker_driver_name[] = "rocker";
41
42static const struct pci_device_id rocker_pci_id_table[] = {
43 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
44 {0, }
45};
46
Scott Feldman9f6bbf72014-11-28 14:34:27 +010047struct rocker_flow_tbl_key {
48 u32 priority;
49 enum rocker_of_dpa_table_id tbl_id;
50 union {
51 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080052 u32 in_pport;
53 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010054 enum rocker_of_dpa_table_id goto_tbl;
55 } ig_port;
56 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080057 u32 in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010058 __be16 vlan_id;
59 __be16 vlan_id_mask;
60 enum rocker_of_dpa_table_id goto_tbl;
61 bool untagged;
62 __be16 new_vlan_id;
63 } vlan;
64 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080065 u32 in_pport;
66 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010067 __be16 eth_type;
68 u8 eth_dst[ETH_ALEN];
69 u8 eth_dst_mask[ETH_ALEN];
70 __be16 vlan_id;
71 __be16 vlan_id_mask;
72 enum rocker_of_dpa_table_id goto_tbl;
73 bool copy_to_cpu;
74 } term_mac;
75 struct {
76 __be16 eth_type;
77 __be32 dst4;
78 __be32 dst4_mask;
79 enum rocker_of_dpa_table_id goto_tbl;
80 u32 group_id;
81 } ucast_routing;
82 struct {
83 u8 eth_dst[ETH_ALEN];
84 u8 eth_dst_mask[ETH_ALEN];
85 int has_eth_dst;
86 int has_eth_dst_mask;
87 __be16 vlan_id;
88 u32 tunnel_id;
89 enum rocker_of_dpa_table_id goto_tbl;
90 u32 group_id;
91 bool copy_to_cpu;
92 } bridge;
93 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080094 u32 in_pport;
95 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010096 u8 eth_src[ETH_ALEN];
97 u8 eth_src_mask[ETH_ALEN];
98 u8 eth_dst[ETH_ALEN];
99 u8 eth_dst_mask[ETH_ALEN];
100 __be16 eth_type;
101 __be16 vlan_id;
102 __be16 vlan_id_mask;
103 u8 ip_proto;
104 u8 ip_proto_mask;
105 u8 ip_tos;
106 u8 ip_tos_mask;
107 u32 group_id;
108 } acl;
109 };
110};
111
112struct rocker_flow_tbl_entry {
113 struct hlist_node entry;
114 u32 ref_count;
115 u64 cookie;
116 struct rocker_flow_tbl_key key;
117 u32 key_crc32; /* key */
118};
119
120struct rocker_group_tbl_entry {
121 struct hlist_node entry;
122 u32 cmd;
123 u32 group_id; /* key */
124 u16 group_count;
125 u32 *group_ids;
126 union {
127 struct {
128 u8 pop_vlan;
129 } l2_interface;
130 struct {
131 u8 eth_src[ETH_ALEN];
132 u8 eth_dst[ETH_ALEN];
133 __be16 vlan_id;
134 u32 group_id;
135 } l2_rewrite;
136 struct {
137 u8 eth_src[ETH_ALEN];
138 u8 eth_dst[ETH_ALEN];
139 __be16 vlan_id;
140 bool ttl_check;
141 u32 group_id;
142 } l3_unicast;
143 };
144};
145
146struct rocker_fdb_tbl_entry {
147 struct hlist_node entry;
148 u32 key_crc32; /* key */
149 bool learned;
150 struct rocker_fdb_tbl_key {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800151 u32 pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100152 u8 addr[ETH_ALEN];
153 __be16 vlan_id;
154 } key;
155};
156
157struct rocker_internal_vlan_tbl_entry {
158 struct hlist_node entry;
159 int ifindex; /* key */
160 u32 ref_count;
161 __be16 vlan_id;
162};
163
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100164struct rocker_desc_info {
165 char *data; /* mapped */
166 size_t data_size;
167 size_t tlv_size;
168 struct rocker_desc *desc;
169 DEFINE_DMA_UNMAP_ADDR(mapaddr);
170};
171
172struct rocker_dma_ring_info {
173 size_t size;
174 u32 head;
175 u32 tail;
176 struct rocker_desc *desc; /* mapped */
177 dma_addr_t mapaddr;
178 struct rocker_desc_info *desc_info;
179 unsigned int type;
180};
181
182struct rocker;
183
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100184enum {
185 ROCKER_CTRL_LINK_LOCAL_MCAST,
186 ROCKER_CTRL_LOCAL_ARP,
187 ROCKER_CTRL_IPV4_MCAST,
188 ROCKER_CTRL_IPV6_MCAST,
189 ROCKER_CTRL_DFLT_BRIDGING,
190 ROCKER_CTRL_MAX,
191};
192
193#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
194#define ROCKER_N_INTERNAL_VLANS 255
195#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
196#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
197
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100198struct rocker_port {
199 struct net_device *dev;
Scott Feldman6c707942014-11-28 14:34:28 +0100200 struct net_device *bridge_dev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100201 struct rocker *rocker;
202 unsigned int port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800203 u32 pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100204 __be16 internal_vlan_id;
Scott Feldman6c707942014-11-28 14:34:28 +0100205 int stp_state;
Scott Feldman5111f802014-11-28 14:34:30 +0100206 u32 brport_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100207 bool ctrls[ROCKER_CTRL_MAX];
208 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100209 struct napi_struct napi_tx;
210 struct napi_struct napi_rx;
211 struct rocker_dma_ring_info tx_ring;
212 struct rocker_dma_ring_info rx_ring;
213};
214
215struct rocker {
216 struct pci_dev *pdev;
217 u8 __iomem *hw_addr;
218 struct msix_entry *msix_entries;
219 unsigned int port_count;
220 struct rocker_port **ports;
221 struct {
222 u64 id;
223 } hw;
224 spinlock_t cmd_ring_lock;
225 struct rocker_dma_ring_info cmd_ring;
226 struct rocker_dma_ring_info event_ring;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100227 DECLARE_HASHTABLE(flow_tbl, 16);
228 spinlock_t flow_tbl_lock;
229 u64 flow_tbl_next_cookie;
230 DECLARE_HASHTABLE(group_tbl, 16);
231 spinlock_t group_tbl_lock;
232 DECLARE_HASHTABLE(fdb_tbl, 16);
233 spinlock_t fdb_tbl_lock;
234 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
235 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
236 spinlock_t internal_vlan_tbl_lock;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100237};
238
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100239static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
240static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
241static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
242static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
243static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
244static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
245static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
246static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
247static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
248
249/* Rocker priority levels for flow table entries. Higher
250 * priority match takes precedence over lower priority match.
251 */
252
253enum {
254 ROCKER_PRIORITY_UNKNOWN = 0,
255 ROCKER_PRIORITY_IG_PORT = 1,
256 ROCKER_PRIORITY_VLAN = 1,
257 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
258 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
259 ROCKER_PRIORITY_UNICAST_ROUTING = 1,
260 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
261 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
262 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
263 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
264 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
265 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
266 ROCKER_PRIORITY_ACL_CTRL = 3,
267 ROCKER_PRIORITY_ACL_NORMAL = 2,
268 ROCKER_PRIORITY_ACL_DFLT = 1,
269};
270
271static bool rocker_vlan_id_is_internal(__be16 vlan_id)
272{
273 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
274 u16 end = 0xffe;
275 u16 _vlan_id = ntohs(vlan_id);
276
277 return (_vlan_id >= start && _vlan_id <= end);
278}
279
280static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
281 u16 vid, bool *pop_vlan)
282{
283 __be16 vlan_id;
284
285 if (pop_vlan)
286 *pop_vlan = false;
287 vlan_id = htons(vid);
288 if (!vlan_id) {
289 vlan_id = rocker_port->internal_vlan_id;
290 if (pop_vlan)
291 *pop_vlan = true;
292 }
293
294 return vlan_id;
295}
296
Scott Feldman6c707942014-11-28 14:34:28 +0100297static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
298 __be16 vlan_id)
299{
300 if (rocker_vlan_id_is_internal(vlan_id))
301 return 0;
302
303 return ntohs(vlan_id);
304}
305
306static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
307{
308 return !!rocker_port->bridge_dev;
309}
310
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100311struct rocker_wait {
312 wait_queue_head_t wait;
313 bool done;
314 bool nowait;
315};
316
317static void rocker_wait_reset(struct rocker_wait *wait)
318{
319 wait->done = false;
320 wait->nowait = false;
321}
322
323static void rocker_wait_init(struct rocker_wait *wait)
324{
325 init_waitqueue_head(&wait->wait);
326 rocker_wait_reset(wait);
327}
328
329static struct rocker_wait *rocker_wait_create(gfp_t gfp)
330{
331 struct rocker_wait *wait;
332
333 wait = kmalloc(sizeof(*wait), gfp);
334 if (!wait)
335 return NULL;
336 rocker_wait_init(wait);
337 return wait;
338}
339
340static void rocker_wait_destroy(struct rocker_wait *work)
341{
342 kfree(work);
343}
344
345static bool rocker_wait_event_timeout(struct rocker_wait *wait,
346 unsigned long timeout)
347{
348 wait_event_timeout(wait->wait, wait->done, HZ / 10);
349 if (!wait->done)
350 return false;
351 return true;
352}
353
354static void rocker_wait_wake_up(struct rocker_wait *wait)
355{
356 wait->done = true;
357 wake_up(&wait->wait);
358}
359
360static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
361{
362 return rocker->msix_entries[vector].vector;
363}
364
365static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
366{
367 return rocker_msix_vector(rocker_port->rocker,
368 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
369}
370
371static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
372{
373 return rocker_msix_vector(rocker_port->rocker,
374 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
375}
376
377#define rocker_write32(rocker, reg, val) \
378 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
379#define rocker_read32(rocker, reg) \
380 readl((rocker)->hw_addr + (ROCKER_ ## reg))
381#define rocker_write64(rocker, reg, val) \
382 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
383#define rocker_read64(rocker, reg) \
384 readq((rocker)->hw_addr + (ROCKER_ ## reg))
385
386/*****************************
387 * HW basic testing functions
388 *****************************/
389
390static int rocker_reg_test(struct rocker *rocker)
391{
392 struct pci_dev *pdev = rocker->pdev;
393 u64 test_reg;
394 u64 rnd;
395
396 rnd = prandom_u32();
397 rnd >>= 1;
398 rocker_write32(rocker, TEST_REG, rnd);
399 test_reg = rocker_read32(rocker, TEST_REG);
400 if (test_reg != rnd * 2) {
401 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
402 test_reg, rnd * 2);
403 return -EIO;
404 }
405
406 rnd = prandom_u32();
407 rnd <<= 31;
408 rnd |= prandom_u32();
409 rocker_write64(rocker, TEST_REG64, rnd);
410 test_reg = rocker_read64(rocker, TEST_REG64);
411 if (test_reg != rnd * 2) {
412 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
413 test_reg, rnd * 2);
414 return -EIO;
415 }
416
417 return 0;
418}
419
420static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
421 u32 test_type, dma_addr_t dma_handle,
422 unsigned char *buf, unsigned char *expect,
423 size_t size)
424{
425 struct pci_dev *pdev = rocker->pdev;
426 int i;
427
428 rocker_wait_reset(wait);
429 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
430
431 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
432 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
433 return -EIO;
434 }
435
436 for (i = 0; i < size; i++) {
437 if (buf[i] != expect[i]) {
438 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
439 buf[i], i, expect[i]);
440 return -EIO;
441 }
442 }
443 return 0;
444}
445
446#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
447#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
448
449static int rocker_dma_test_offset(struct rocker *rocker,
450 struct rocker_wait *wait, int offset)
451{
452 struct pci_dev *pdev = rocker->pdev;
453 unsigned char *alloc;
454 unsigned char *buf;
455 unsigned char *expect;
456 dma_addr_t dma_handle;
457 int i;
458 int err;
459
460 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
461 GFP_KERNEL | GFP_DMA);
462 if (!alloc)
463 return -ENOMEM;
464 buf = alloc + offset;
465 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
466
467 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
468 PCI_DMA_BIDIRECTIONAL);
469 if (pci_dma_mapping_error(pdev, dma_handle)) {
470 err = -EIO;
471 goto free_alloc;
472 }
473
474 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
475 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
476
477 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
478 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
479 dma_handle, buf, expect,
480 ROCKER_TEST_DMA_BUF_SIZE);
481 if (err)
482 goto unmap;
483
484 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
485 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
486 dma_handle, buf, expect,
487 ROCKER_TEST_DMA_BUF_SIZE);
488 if (err)
489 goto unmap;
490
491 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
492 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
493 expect[i] = ~buf[i];
494 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
495 dma_handle, buf, expect,
496 ROCKER_TEST_DMA_BUF_SIZE);
497 if (err)
498 goto unmap;
499
500unmap:
501 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
502 PCI_DMA_BIDIRECTIONAL);
503free_alloc:
504 kfree(alloc);
505
506 return err;
507}
508
509static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
510{
511 int i;
512 int err;
513
514 for (i = 0; i < 8; i++) {
515 err = rocker_dma_test_offset(rocker, wait, i);
516 if (err)
517 return err;
518 }
519 return 0;
520}
521
522static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
523{
524 struct rocker_wait *wait = dev_id;
525
526 rocker_wait_wake_up(wait);
527
528 return IRQ_HANDLED;
529}
530
531static int rocker_basic_hw_test(struct rocker *rocker)
532{
533 struct pci_dev *pdev = rocker->pdev;
534 struct rocker_wait wait;
535 int err;
536
537 err = rocker_reg_test(rocker);
538 if (err) {
539 dev_err(&pdev->dev, "reg test failed\n");
540 return err;
541 }
542
543 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
544 rocker_test_irq_handler, 0,
545 rocker_driver_name, &wait);
546 if (err) {
547 dev_err(&pdev->dev, "cannot assign test irq\n");
548 return err;
549 }
550
551 rocker_wait_init(&wait);
552 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
553
554 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
555 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
556 err = -EIO;
557 goto free_irq;
558 }
559
560 err = rocker_dma_test(rocker, &wait);
561 if (err)
562 dev_err(&pdev->dev, "dma test failed\n");
563
564free_irq:
565 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
566 return err;
567}
568
569/******
570 * TLV
571 ******/
572
573#define ROCKER_TLV_ALIGNTO 8U
574#define ROCKER_TLV_ALIGN(len) \
575 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
576#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
577
578/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
579 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
580 * | Header | Pad | Payload | Pad |
581 * | (struct rocker_tlv) | ing | | ing |
582 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
583 * <--------------------------- tlv->len -------------------------->
584 */
585
586static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
587 int *remaining)
588{
589 int totlen = ROCKER_TLV_ALIGN(tlv->len);
590
591 *remaining -= totlen;
592 return (struct rocker_tlv *) ((char *) tlv + totlen);
593}
594
595static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
596{
597 return remaining >= (int) ROCKER_TLV_HDRLEN &&
598 tlv->len >= ROCKER_TLV_HDRLEN &&
599 tlv->len <= remaining;
600}
601
602#define rocker_tlv_for_each(pos, head, len, rem) \
603 for (pos = head, rem = len; \
604 rocker_tlv_ok(pos, rem); \
605 pos = rocker_tlv_next(pos, &(rem)))
606
607#define rocker_tlv_for_each_nested(pos, tlv, rem) \
608 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
609 rocker_tlv_len(tlv), rem)
610
611static int rocker_tlv_attr_size(int payload)
612{
613 return ROCKER_TLV_HDRLEN + payload;
614}
615
616static int rocker_tlv_total_size(int payload)
617{
618 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
619}
620
621static int rocker_tlv_padlen(int payload)
622{
623 return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
624}
625
626static int rocker_tlv_type(const struct rocker_tlv *tlv)
627{
628 return tlv->type;
629}
630
631static void *rocker_tlv_data(const struct rocker_tlv *tlv)
632{
633 return (char *) tlv + ROCKER_TLV_HDRLEN;
634}
635
636static int rocker_tlv_len(const struct rocker_tlv *tlv)
637{
638 return tlv->len - ROCKER_TLV_HDRLEN;
639}
640
641static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
642{
643 return *(u8 *) rocker_tlv_data(tlv);
644}
645
646static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
647{
648 return *(u16 *) rocker_tlv_data(tlv);
649}
650
Jiri Pirko9b03c712014-12-03 14:14:53 +0100651static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
652{
653 return *(__be16 *) rocker_tlv_data(tlv);
654}
655
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100656static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
657{
658 return *(u32 *) rocker_tlv_data(tlv);
659}
660
661static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
662{
663 return *(u64 *) rocker_tlv_data(tlv);
664}
665
666static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
667 const char *buf, int buf_len)
668{
669 const struct rocker_tlv *tlv;
670 const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
671 int rem;
672
673 memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
674
675 rocker_tlv_for_each(tlv, head, buf_len, rem) {
676 u32 type = rocker_tlv_type(tlv);
677
678 if (type > 0 && type <= maxtype)
679 tb[type] = (struct rocker_tlv *) tlv;
680 }
681}
682
683static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
684 const struct rocker_tlv *tlv)
685{
686 rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
687 rocker_tlv_len(tlv));
688}
689
690static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
691 struct rocker_desc_info *desc_info)
692{
693 rocker_tlv_parse(tb, maxtype, desc_info->data,
694 desc_info->desc->tlv_size);
695}
696
697static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
698{
699 return (struct rocker_tlv *) ((char *) desc_info->data +
700 desc_info->tlv_size);
701}
702
703static int rocker_tlv_put(struct rocker_desc_info *desc_info,
704 int attrtype, int attrlen, const void *data)
705{
706 int tail_room = desc_info->data_size - desc_info->tlv_size;
707 int total_size = rocker_tlv_total_size(attrlen);
708 struct rocker_tlv *tlv;
709
710 if (unlikely(tail_room < total_size))
711 return -EMSGSIZE;
712
713 tlv = rocker_tlv_start(desc_info);
714 desc_info->tlv_size += total_size;
715 tlv->type = attrtype;
716 tlv->len = rocker_tlv_attr_size(attrlen);
717 memcpy(rocker_tlv_data(tlv), data, attrlen);
718 memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
719 return 0;
720}
721
722static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
723 int attrtype, u8 value)
724{
725 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
726}
727
728static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
729 int attrtype, u16 value)
730{
731 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
732}
733
Jiri Pirko9b03c712014-12-03 14:14:53 +0100734static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
735 int attrtype, __be16 value)
736{
737 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
738}
739
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100740static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
741 int attrtype, u32 value)
742{
743 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
744}
745
Jiri Pirko9b03c712014-12-03 14:14:53 +0100746static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
747 int attrtype, __be32 value)
748{
749 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
750}
751
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100752static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
753 int attrtype, u64 value)
754{
755 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
756}
757
758static struct rocker_tlv *
759rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
760{
761 struct rocker_tlv *start = rocker_tlv_start(desc_info);
762
763 if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
764 return NULL;
765
766 return start;
767}
768
769static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
770 struct rocker_tlv *start)
771{
772 start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
773}
774
775static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
776 struct rocker_tlv *start)
777{
778 desc_info->tlv_size = (char *) start - desc_info->data;
779}
780
781/******************************************
782 * DMA rings and descriptors manipulations
783 ******************************************/
784
785static u32 __pos_inc(u32 pos, size_t limit)
786{
787 return ++pos == limit ? 0 : pos;
788}
789
790static int rocker_desc_err(struct rocker_desc_info *desc_info)
791{
Scott Feldman7eb344f2015-02-25 20:15:36 -0800792 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
793
794 switch (err) {
795 case ROCKER_OK:
796 return 0;
797 case -ROCKER_ENOENT:
798 return -ENOENT;
799 case -ROCKER_ENXIO:
800 return -ENXIO;
801 case -ROCKER_ENOMEM:
802 return -ENOMEM;
803 case -ROCKER_EEXIST:
804 return -EEXIST;
805 case -ROCKER_EINVAL:
806 return -EINVAL;
807 case -ROCKER_EMSGSIZE:
808 return -EMSGSIZE;
809 case -ROCKER_ENOTSUP:
810 return -EOPNOTSUPP;
811 case -ROCKER_ENOBUFS:
812 return -ENOBUFS;
813 }
814
815 return -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100816}
817
818static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
819{
820 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
821}
822
823static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
824{
825 u32 comp_err = desc_info->desc->comp_err;
826
827 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
828}
829
830static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
831{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100832 return (void *)(uintptr_t)desc_info->desc->cookie;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100833}
834
835static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
836 void *ptr)
837{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100838 desc_info->desc->cookie = (uintptr_t) ptr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100839}
840
841static struct rocker_desc_info *
842rocker_desc_head_get(struct rocker_dma_ring_info *info)
843{
844 static struct rocker_desc_info *desc_info;
845 u32 head = __pos_inc(info->head, info->size);
846
847 desc_info = &info->desc_info[info->head];
848 if (head == info->tail)
849 return NULL; /* ring full */
850 desc_info->tlv_size = 0;
851 return desc_info;
852}
853
854static void rocker_desc_commit(struct rocker_desc_info *desc_info)
855{
856 desc_info->desc->buf_size = desc_info->data_size;
857 desc_info->desc->tlv_size = desc_info->tlv_size;
858}
859
860static void rocker_desc_head_set(struct rocker *rocker,
861 struct rocker_dma_ring_info *info,
862 struct rocker_desc_info *desc_info)
863{
864 u32 head = __pos_inc(info->head, info->size);
865
866 BUG_ON(head == info->tail);
867 rocker_desc_commit(desc_info);
868 info->head = head;
869 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
870}
871
872static struct rocker_desc_info *
873rocker_desc_tail_get(struct rocker_dma_ring_info *info)
874{
875 static struct rocker_desc_info *desc_info;
876
877 if (info->tail == info->head)
878 return NULL; /* nothing to be done between head and tail */
879 desc_info = &info->desc_info[info->tail];
880 if (!rocker_desc_gen(desc_info))
881 return NULL; /* gen bit not set, desc is not ready yet */
882 info->tail = __pos_inc(info->tail, info->size);
883 desc_info->tlv_size = desc_info->desc->tlv_size;
884 return desc_info;
885}
886
887static void rocker_dma_ring_credits_set(struct rocker *rocker,
888 struct rocker_dma_ring_info *info,
889 u32 credits)
890{
891 if (credits)
892 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
893}
894
895static unsigned long rocker_dma_ring_size_fix(size_t size)
896{
897 return max(ROCKER_DMA_SIZE_MIN,
898 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
899}
900
901static int rocker_dma_ring_create(struct rocker *rocker,
902 unsigned int type,
903 size_t size,
904 struct rocker_dma_ring_info *info)
905{
906 int i;
907
908 BUG_ON(size != rocker_dma_ring_size_fix(size));
909 info->size = size;
910 info->type = type;
911 info->head = 0;
912 info->tail = 0;
913 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
914 GFP_KERNEL);
915 if (!info->desc_info)
916 return -ENOMEM;
917
918 info->desc = pci_alloc_consistent(rocker->pdev,
919 info->size * sizeof(*info->desc),
920 &info->mapaddr);
921 if (!info->desc) {
922 kfree(info->desc_info);
923 return -ENOMEM;
924 }
925
926 for (i = 0; i < info->size; i++)
927 info->desc_info[i].desc = &info->desc[i];
928
929 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
930 ROCKER_DMA_DESC_CTRL_RESET);
931 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
932 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
933
934 return 0;
935}
936
937static void rocker_dma_ring_destroy(struct rocker *rocker,
938 struct rocker_dma_ring_info *info)
939{
940 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
941
942 pci_free_consistent(rocker->pdev,
943 info->size * sizeof(struct rocker_desc),
944 info->desc, info->mapaddr);
945 kfree(info->desc_info);
946}
947
948static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
949 struct rocker_dma_ring_info *info)
950{
951 int i;
952
953 BUG_ON(info->head || info->tail);
954
955 /* When ring is consumer, we need to advance head for each desc.
956 * That tells hw that the desc is ready to be used by it.
957 */
958 for (i = 0; i < info->size - 1; i++)
959 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
960 rocker_desc_commit(&info->desc_info[i]);
961}
962
963static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
964 struct rocker_dma_ring_info *info,
965 int direction, size_t buf_size)
966{
967 struct pci_dev *pdev = rocker->pdev;
968 int i;
969 int err;
970
971 for (i = 0; i < info->size; i++) {
972 struct rocker_desc_info *desc_info = &info->desc_info[i];
973 struct rocker_desc *desc = &info->desc[i];
974 dma_addr_t dma_handle;
975 char *buf;
976
977 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
978 if (!buf) {
979 err = -ENOMEM;
980 goto rollback;
981 }
982
983 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
984 if (pci_dma_mapping_error(pdev, dma_handle)) {
985 kfree(buf);
986 err = -EIO;
987 goto rollback;
988 }
989
990 desc_info->data = buf;
991 desc_info->data_size = buf_size;
992 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
993
994 desc->buf_addr = dma_handle;
995 desc->buf_size = buf_size;
996 }
997 return 0;
998
999rollback:
1000 for (i--; i >= 0; i--) {
1001 struct rocker_desc_info *desc_info = &info->desc_info[i];
1002
1003 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1004 desc_info->data_size, direction);
1005 kfree(desc_info->data);
1006 }
1007 return err;
1008}
1009
1010static void rocker_dma_ring_bufs_free(struct rocker *rocker,
1011 struct rocker_dma_ring_info *info,
1012 int direction)
1013{
1014 struct pci_dev *pdev = rocker->pdev;
1015 int i;
1016
1017 for (i = 0; i < info->size; i++) {
1018 struct rocker_desc_info *desc_info = &info->desc_info[i];
1019 struct rocker_desc *desc = &info->desc[i];
1020
1021 desc->buf_addr = 0;
1022 desc->buf_size = 0;
1023 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1024 desc_info->data_size, direction);
1025 kfree(desc_info->data);
1026 }
1027}
1028
1029static int rocker_dma_rings_init(struct rocker *rocker)
1030{
1031 struct pci_dev *pdev = rocker->pdev;
1032 int err;
1033
1034 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1035 ROCKER_DMA_CMD_DEFAULT_SIZE,
1036 &rocker->cmd_ring);
1037 if (err) {
1038 dev_err(&pdev->dev, "failed to create command dma ring\n");
1039 return err;
1040 }
1041
1042 spin_lock_init(&rocker->cmd_ring_lock);
1043
1044 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1045 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1046 if (err) {
1047 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1048 goto err_dma_cmd_ring_bufs_alloc;
1049 }
1050
1051 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1052 ROCKER_DMA_EVENT_DEFAULT_SIZE,
1053 &rocker->event_ring);
1054 if (err) {
1055 dev_err(&pdev->dev, "failed to create event dma ring\n");
1056 goto err_dma_event_ring_create;
1057 }
1058
1059 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1060 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1061 if (err) {
1062 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1063 goto err_dma_event_ring_bufs_alloc;
1064 }
1065 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1066 return 0;
1067
1068err_dma_event_ring_bufs_alloc:
1069 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1070err_dma_event_ring_create:
1071 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1072 PCI_DMA_BIDIRECTIONAL);
1073err_dma_cmd_ring_bufs_alloc:
1074 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1075 return err;
1076}
1077
1078static void rocker_dma_rings_fini(struct rocker *rocker)
1079{
1080 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1081 PCI_DMA_BIDIRECTIONAL);
1082 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1083 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1084 PCI_DMA_BIDIRECTIONAL);
1085 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1086}
1087
1088static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
1089 struct rocker_port *rocker_port,
1090 struct rocker_desc_info *desc_info,
1091 struct sk_buff *skb, size_t buf_len)
1092{
1093 struct pci_dev *pdev = rocker->pdev;
1094 dma_addr_t dma_handle;
1095
1096 dma_handle = pci_map_single(pdev, skb->data, buf_len,
1097 PCI_DMA_FROMDEVICE);
1098 if (pci_dma_mapping_error(pdev, dma_handle))
1099 return -EIO;
1100 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1101 goto tlv_put_failure;
1102 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1103 goto tlv_put_failure;
1104 return 0;
1105
1106tlv_put_failure:
1107 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1108 desc_info->tlv_size = 0;
1109 return -EMSGSIZE;
1110}
1111
1112static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
1113{
1114 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1115}
1116
1117static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
1118 struct rocker_port *rocker_port,
1119 struct rocker_desc_info *desc_info)
1120{
1121 struct net_device *dev = rocker_port->dev;
1122 struct sk_buff *skb;
1123 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1124 int err;
1125
1126 /* Ensure that hw will see tlv_size zero in case of an error.
1127 * That tells hw to use another descriptor.
1128 */
1129 rocker_desc_cookie_ptr_set(desc_info, NULL);
1130 desc_info->tlv_size = 0;
1131
1132 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1133 if (!skb)
1134 return -ENOMEM;
1135 err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
1136 skb, buf_len);
1137 if (err) {
1138 dev_kfree_skb_any(skb);
1139 return err;
1140 }
1141 rocker_desc_cookie_ptr_set(desc_info, skb);
1142 return 0;
1143}
1144
1145static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
1146 struct rocker_tlv **attrs)
1147{
1148 struct pci_dev *pdev = rocker->pdev;
1149 dma_addr_t dma_handle;
1150 size_t len;
1151
1152 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1153 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1154 return;
1155 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1156 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1157 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1158}
1159
1160static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
1161 struct rocker_desc_info *desc_info)
1162{
1163 struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1164 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1165
1166 if (!skb)
1167 return;
1168 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1169 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1170 dev_kfree_skb_any(skb);
1171}
1172
1173static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
1174 struct rocker_port *rocker_port)
1175{
1176 struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1177 int i;
1178 int err;
1179
1180 for (i = 0; i < rx_ring->size; i++) {
1181 err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
1182 &rx_ring->desc_info[i]);
1183 if (err)
1184 goto rollback;
1185 }
1186 return 0;
1187
1188rollback:
1189 for (i--; i >= 0; i--)
1190 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1191 return err;
1192}
1193
1194static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
1195 struct rocker_port *rocker_port)
1196{
1197 struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1198 int i;
1199
1200 for (i = 0; i < rx_ring->size; i++)
1201 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1202}
1203
1204static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1205{
1206 struct rocker *rocker = rocker_port->rocker;
1207 int err;
1208
1209 err = rocker_dma_ring_create(rocker,
1210 ROCKER_DMA_TX(rocker_port->port_number),
1211 ROCKER_DMA_TX_DEFAULT_SIZE,
1212 &rocker_port->tx_ring);
1213 if (err) {
1214 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1215 return err;
1216 }
1217
1218 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1219 PCI_DMA_TODEVICE,
1220 ROCKER_DMA_TX_DESC_SIZE);
1221 if (err) {
1222 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1223 goto err_dma_tx_ring_bufs_alloc;
1224 }
1225
1226 err = rocker_dma_ring_create(rocker,
1227 ROCKER_DMA_RX(rocker_port->port_number),
1228 ROCKER_DMA_RX_DEFAULT_SIZE,
1229 &rocker_port->rx_ring);
1230 if (err) {
1231 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1232 goto err_dma_rx_ring_create;
1233 }
1234
1235 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1236 PCI_DMA_BIDIRECTIONAL,
1237 ROCKER_DMA_RX_DESC_SIZE);
1238 if (err) {
1239 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1240 goto err_dma_rx_ring_bufs_alloc;
1241 }
1242
1243 err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
1244 if (err) {
1245 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1246 goto err_dma_rx_ring_skbs_alloc;
1247 }
1248 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1249
1250 return 0;
1251
1252err_dma_rx_ring_skbs_alloc:
1253 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1254 PCI_DMA_BIDIRECTIONAL);
1255err_dma_rx_ring_bufs_alloc:
1256 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1257err_dma_rx_ring_create:
1258 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1259 PCI_DMA_TODEVICE);
1260err_dma_tx_ring_bufs_alloc:
1261 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1262 return err;
1263}
1264
1265static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1266{
1267 struct rocker *rocker = rocker_port->rocker;
1268
1269 rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
1270 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1271 PCI_DMA_BIDIRECTIONAL);
1272 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1273 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1274 PCI_DMA_TODEVICE);
1275 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1276}
1277
1278static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1279{
1280 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1281
1282 if (enable)
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001283 val |= 1 << rocker_port->pport;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001284 else
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001285 val &= ~(1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001286 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1287}
1288
1289/********************************
1290 * Interrupt handler and helpers
1291 ********************************/
1292
1293static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1294{
1295 struct rocker *rocker = dev_id;
1296 struct rocker_desc_info *desc_info;
1297 struct rocker_wait *wait;
1298 u32 credits = 0;
1299
1300 spin_lock(&rocker->cmd_ring_lock);
1301 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1302 wait = rocker_desc_cookie_ptr_get(desc_info);
1303 if (wait->nowait) {
1304 rocker_desc_gen_clear(desc_info);
1305 rocker_wait_destroy(wait);
1306 } else {
1307 rocker_wait_wake_up(wait);
1308 }
1309 credits++;
1310 }
1311 spin_unlock(&rocker->cmd_ring_lock);
1312 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1313
1314 return IRQ_HANDLED;
1315}
1316
1317static void rocker_port_link_up(struct rocker_port *rocker_port)
1318{
1319 netif_carrier_on(rocker_port->dev);
1320 netdev_info(rocker_port->dev, "Link is up\n");
1321}
1322
1323static void rocker_port_link_down(struct rocker_port *rocker_port)
1324{
1325 netif_carrier_off(rocker_port->dev);
1326 netdev_info(rocker_port->dev, "Link is down\n");
1327}
1328
1329static int rocker_event_link_change(struct rocker *rocker,
1330 const struct rocker_tlv *info)
1331{
1332 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1333 unsigned int port_number;
1334 bool link_up;
1335 struct rocker_port *rocker_port;
1336
1337 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001338 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001339 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1340 return -EIO;
1341 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001342 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001343 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1344
1345 if (port_number >= rocker->port_count)
1346 return -EINVAL;
1347
1348 rocker_port = rocker->ports[port_number];
1349 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1350 if (link_up)
1351 rocker_port_link_up(rocker_port);
1352 else
1353 rocker_port_link_down(rocker_port);
1354 }
1355
1356 return 0;
1357}
1358
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001359#define ROCKER_OP_FLAG_REMOVE BIT(0)
1360#define ROCKER_OP_FLAG_NOWAIT BIT(1)
1361#define ROCKER_OP_FLAG_LEARNED BIT(2)
Scott Feldman6c707942014-11-28 14:34:28 +01001362#define ROCKER_OP_FLAG_REFRESH BIT(3)
1363
1364static int rocker_port_fdb(struct rocker_port *rocker_port,
1365 const unsigned char *addr,
1366 __be16 vlan_id, int flags);
1367
1368static int rocker_event_mac_vlan_seen(struct rocker *rocker,
1369 const struct rocker_tlv *info)
1370{
1371 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1372 unsigned int port_number;
1373 struct rocker_port *rocker_port;
1374 unsigned char *addr;
1375 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1376 __be16 vlan_id;
1377
1378 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001379 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
Scott Feldman6c707942014-11-28 14:34:28 +01001380 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1381 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1382 return -EIO;
1383 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001384 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
Scott Feldman6c707942014-11-28 14:34:28 +01001385 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
Jiri Pirko9b03c712014-12-03 14:14:53 +01001386 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
Scott Feldman6c707942014-11-28 14:34:28 +01001387
1388 if (port_number >= rocker->port_count)
1389 return -EINVAL;
1390
1391 rocker_port = rocker->ports[port_number];
1392
1393 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1394 rocker_port->stp_state != BR_STATE_FORWARDING)
1395 return 0;
1396
1397 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
1398}
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001399
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001400static int rocker_event_process(struct rocker *rocker,
1401 struct rocker_desc_info *desc_info)
1402{
1403 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1404 struct rocker_tlv *info;
1405 u16 type;
1406
1407 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1408 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1409 !attrs[ROCKER_TLV_EVENT_INFO])
1410 return -EIO;
1411
1412 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1413 info = attrs[ROCKER_TLV_EVENT_INFO];
1414
1415 switch (type) {
1416 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1417 return rocker_event_link_change(rocker, info);
Scott Feldman6c707942014-11-28 14:34:28 +01001418 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1419 return rocker_event_mac_vlan_seen(rocker, info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001420 }
1421
1422 return -EOPNOTSUPP;
1423}
1424
1425static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1426{
1427 struct rocker *rocker = dev_id;
1428 struct pci_dev *pdev = rocker->pdev;
1429 struct rocker_desc_info *desc_info;
1430 u32 credits = 0;
1431 int err;
1432
1433 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1434 err = rocker_desc_err(desc_info);
1435 if (err) {
1436 dev_err(&pdev->dev, "event desc received with err %d\n",
1437 err);
1438 } else {
1439 err = rocker_event_process(rocker, desc_info);
1440 if (err)
1441 dev_err(&pdev->dev, "event processing failed with err %d\n",
1442 err);
1443 }
1444 rocker_desc_gen_clear(desc_info);
1445 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1446 credits++;
1447 }
1448 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1449
1450 return IRQ_HANDLED;
1451}
1452
1453static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1454{
1455 struct rocker_port *rocker_port = dev_id;
1456
1457 napi_schedule(&rocker_port->napi_tx);
1458 return IRQ_HANDLED;
1459}
1460
1461static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1462{
1463 struct rocker_port *rocker_port = dev_id;
1464
1465 napi_schedule(&rocker_port->napi_rx);
1466 return IRQ_HANDLED;
1467}
1468
1469/********************
1470 * Command interface
1471 ********************/
1472
1473typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
1474 struct rocker_port *rocker_port,
1475 struct rocker_desc_info *desc_info,
1476 void *priv);
1477
1478static int rocker_cmd_exec(struct rocker *rocker,
1479 struct rocker_port *rocker_port,
1480 rocker_cmd_cb_t prepare, void *prepare_priv,
1481 rocker_cmd_cb_t process, void *process_priv,
1482 bool nowait)
1483{
1484 struct rocker_desc_info *desc_info;
1485 struct rocker_wait *wait;
1486 unsigned long flags;
1487 int err;
1488
1489 wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
1490 if (!wait)
1491 return -ENOMEM;
1492 wait->nowait = nowait;
1493
1494 spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
1495 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1496 if (!desc_info) {
1497 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1498 err = -EAGAIN;
1499 goto out;
1500 }
1501 err = prepare(rocker, rocker_port, desc_info, prepare_priv);
1502 if (err) {
1503 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1504 goto out;
1505 }
1506 rocker_desc_cookie_ptr_set(desc_info, wait);
1507 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1508 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1509
1510 if (nowait)
1511 return 0;
1512
1513 if (!rocker_wait_event_timeout(wait, HZ / 10))
1514 return -EIO;
1515
1516 err = rocker_desc_err(desc_info);
1517 if (err)
1518 return err;
1519
1520 if (process)
1521 err = process(rocker, rocker_port, desc_info, process_priv);
1522
1523 rocker_desc_gen_clear(desc_info);
1524out:
1525 rocker_wait_destroy(wait);
1526 return err;
1527}
1528
1529static int
1530rocker_cmd_get_port_settings_prep(struct rocker *rocker,
1531 struct rocker_port *rocker_port,
1532 struct rocker_desc_info *desc_info,
1533 void *priv)
1534{
1535 struct rocker_tlv *cmd_info;
1536
1537 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1538 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1539 return -EMSGSIZE;
1540 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1541 if (!cmd_info)
1542 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001543 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1544 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001545 return -EMSGSIZE;
1546 rocker_tlv_nest_end(desc_info, cmd_info);
1547 return 0;
1548}
1549
1550static int
1551rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
1552 struct rocker_port *rocker_port,
1553 struct rocker_desc_info *desc_info,
1554 void *priv)
1555{
1556 struct ethtool_cmd *ecmd = priv;
1557 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1558 struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1559 u32 speed;
1560 u8 duplex;
1561 u8 autoneg;
1562
1563 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1564 if (!attrs[ROCKER_TLV_CMD_INFO])
1565 return -EIO;
1566
1567 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1568 attrs[ROCKER_TLV_CMD_INFO]);
1569 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1570 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1571 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1572 return -EIO;
1573
1574 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1575 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1576 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1577
1578 ecmd->transceiver = XCVR_INTERNAL;
1579 ecmd->supported = SUPPORTED_TP;
1580 ecmd->phy_address = 0xff;
1581 ecmd->port = PORT_TP;
1582 ethtool_cmd_speed_set(ecmd, speed);
1583 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1584 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1585
1586 return 0;
1587}
1588
1589static int
1590rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
1591 struct rocker_port *rocker_port,
1592 struct rocker_desc_info *desc_info,
1593 void *priv)
1594{
1595 unsigned char *macaddr = priv;
1596 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1597 struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1598 struct rocker_tlv *attr;
1599
1600 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1601 if (!attrs[ROCKER_TLV_CMD_INFO])
1602 return -EIO;
1603
1604 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1605 attrs[ROCKER_TLV_CMD_INFO]);
1606 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1607 if (!attr)
1608 return -EIO;
1609
1610 if (rocker_tlv_len(attr) != ETH_ALEN)
1611 return -EINVAL;
1612
1613 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1614 return 0;
1615}
1616
1617static int
1618rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
1619 struct rocker_port *rocker_port,
1620 struct rocker_desc_info *desc_info,
1621 void *priv)
1622{
1623 struct ethtool_cmd *ecmd = priv;
1624 struct rocker_tlv *cmd_info;
1625
1626 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1627 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1628 return -EMSGSIZE;
1629 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1630 if (!cmd_info)
1631 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001632 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1633 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001634 return -EMSGSIZE;
1635 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1636 ethtool_cmd_speed(ecmd)))
1637 return -EMSGSIZE;
1638 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1639 ecmd->duplex))
1640 return -EMSGSIZE;
1641 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1642 ecmd->autoneg))
1643 return -EMSGSIZE;
1644 rocker_tlv_nest_end(desc_info, cmd_info);
1645 return 0;
1646}
1647
1648static int
1649rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
1650 struct rocker_port *rocker_port,
1651 struct rocker_desc_info *desc_info,
1652 void *priv)
1653{
1654 unsigned char *macaddr = priv;
1655 struct rocker_tlv *cmd_info;
1656
1657 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1658 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1659 return -EMSGSIZE;
1660 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1661 if (!cmd_info)
1662 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001663 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1664 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001665 return -EMSGSIZE;
1666 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1667 ETH_ALEN, macaddr))
1668 return -EMSGSIZE;
1669 rocker_tlv_nest_end(desc_info, cmd_info);
1670 return 0;
1671}
1672
Scott Feldman5111f802014-11-28 14:34:30 +01001673static int
1674rocker_cmd_set_port_learning_prep(struct rocker *rocker,
1675 struct rocker_port *rocker_port,
1676 struct rocker_desc_info *desc_info,
1677 void *priv)
1678{
1679 struct rocker_tlv *cmd_info;
1680
1681 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1682 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1683 return -EMSGSIZE;
1684 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1685 if (!cmd_info)
1686 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001687 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1688 rocker_port->pport))
Scott Feldman5111f802014-11-28 14:34:30 +01001689 return -EMSGSIZE;
1690 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1691 !!(rocker_port->brport_flags & BR_LEARNING)))
1692 return -EMSGSIZE;
1693 rocker_tlv_nest_end(desc_info, cmd_info);
1694 return 0;
1695}
1696
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001697static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1698 struct ethtool_cmd *ecmd)
1699{
1700 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1701 rocker_cmd_get_port_settings_prep, NULL,
1702 rocker_cmd_get_port_settings_ethtool_proc,
1703 ecmd, false);
1704}
1705
1706static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1707 unsigned char *macaddr)
1708{
1709 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1710 rocker_cmd_get_port_settings_prep, NULL,
1711 rocker_cmd_get_port_settings_macaddr_proc,
1712 macaddr, false);
1713}
1714
1715static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1716 struct ethtool_cmd *ecmd)
1717{
1718 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1719 rocker_cmd_set_port_settings_ethtool_prep,
1720 ecmd, NULL, NULL, false);
1721}
1722
1723static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1724 unsigned char *macaddr)
1725{
1726 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1727 rocker_cmd_set_port_settings_macaddr_prep,
1728 macaddr, NULL, NULL, false);
1729}
1730
Scott Feldman5111f802014-11-28 14:34:30 +01001731static int rocker_port_set_learning(struct rocker_port *rocker_port)
1732{
1733 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1734 rocker_cmd_set_port_learning_prep,
1735 NULL, NULL, NULL, false);
1736}
1737
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001738static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1739 struct rocker_flow_tbl_entry *entry)
1740{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001741 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1742 entry->key.ig_port.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001743 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001744 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1745 entry->key.ig_port.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001746 return -EMSGSIZE;
1747 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1748 entry->key.ig_port.goto_tbl))
1749 return -EMSGSIZE;
1750
1751 return 0;
1752}
1753
1754static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1755 struct rocker_flow_tbl_entry *entry)
1756{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001757 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1758 entry->key.vlan.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001759 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001760 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1761 entry->key.vlan.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001762 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001763 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1764 entry->key.vlan.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001765 return -EMSGSIZE;
1766 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1767 entry->key.vlan.goto_tbl))
1768 return -EMSGSIZE;
1769 if (entry->key.vlan.untagged &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01001770 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1771 entry->key.vlan.new_vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001772 return -EMSGSIZE;
1773
1774 return 0;
1775}
1776
1777static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1778 struct rocker_flow_tbl_entry *entry)
1779{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001780 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1781 entry->key.term_mac.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001782 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001783 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1784 entry->key.term_mac.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001785 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001786 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1787 entry->key.term_mac.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001788 return -EMSGSIZE;
1789 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1790 ETH_ALEN, entry->key.term_mac.eth_dst))
1791 return -EMSGSIZE;
1792 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1793 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1794 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001795 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1796 entry->key.term_mac.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001797 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001798 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1799 entry->key.term_mac.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001800 return -EMSGSIZE;
1801 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1802 entry->key.term_mac.goto_tbl))
1803 return -EMSGSIZE;
1804 if (entry->key.term_mac.copy_to_cpu &&
1805 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1806 entry->key.term_mac.copy_to_cpu))
1807 return -EMSGSIZE;
1808
1809 return 0;
1810}
1811
1812static int
1813rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1814 struct rocker_flow_tbl_entry *entry)
1815{
Jiri Pirko9b03c712014-12-03 14:14:53 +01001816 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1817 entry->key.ucast_routing.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001818 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001819 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1820 entry->key.ucast_routing.dst4))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001821 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001822 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1823 entry->key.ucast_routing.dst4_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001824 return -EMSGSIZE;
1825 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1826 entry->key.ucast_routing.goto_tbl))
1827 return -EMSGSIZE;
1828 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1829 entry->key.ucast_routing.group_id))
1830 return -EMSGSIZE;
1831
1832 return 0;
1833}
1834
1835static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
1836 struct rocker_flow_tbl_entry *entry)
1837{
1838 if (entry->key.bridge.has_eth_dst &&
1839 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1840 ETH_ALEN, entry->key.bridge.eth_dst))
1841 return -EMSGSIZE;
1842 if (entry->key.bridge.has_eth_dst_mask &&
1843 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1844 ETH_ALEN, entry->key.bridge.eth_dst_mask))
1845 return -EMSGSIZE;
1846 if (entry->key.bridge.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01001847 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1848 entry->key.bridge.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001849 return -EMSGSIZE;
1850 if (entry->key.bridge.tunnel_id &&
1851 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
1852 entry->key.bridge.tunnel_id))
1853 return -EMSGSIZE;
1854 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1855 entry->key.bridge.goto_tbl))
1856 return -EMSGSIZE;
1857 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1858 entry->key.bridge.group_id))
1859 return -EMSGSIZE;
1860 if (entry->key.bridge.copy_to_cpu &&
1861 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1862 entry->key.bridge.copy_to_cpu))
1863 return -EMSGSIZE;
1864
1865 return 0;
1866}
1867
1868static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
1869 struct rocker_flow_tbl_entry *entry)
1870{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001871 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1872 entry->key.acl.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001873 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001874 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1875 entry->key.acl.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001876 return -EMSGSIZE;
1877 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1878 ETH_ALEN, entry->key.acl.eth_src))
1879 return -EMSGSIZE;
1880 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
1881 ETH_ALEN, entry->key.acl.eth_src_mask))
1882 return -EMSGSIZE;
1883 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1884 ETH_ALEN, entry->key.acl.eth_dst))
1885 return -EMSGSIZE;
1886 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1887 ETH_ALEN, entry->key.acl.eth_dst_mask))
1888 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001889 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1890 entry->key.acl.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001891 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001892 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1893 entry->key.acl.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001894 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001895 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1896 entry->key.acl.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001897 return -EMSGSIZE;
1898
1899 switch (ntohs(entry->key.acl.eth_type)) {
1900 case ETH_P_IP:
1901 case ETH_P_IPV6:
1902 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
1903 entry->key.acl.ip_proto))
1904 return -EMSGSIZE;
1905 if (rocker_tlv_put_u8(desc_info,
1906 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
1907 entry->key.acl.ip_proto_mask))
1908 return -EMSGSIZE;
1909 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
1910 entry->key.acl.ip_tos & 0x3f))
1911 return -EMSGSIZE;
1912 if (rocker_tlv_put_u8(desc_info,
1913 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
1914 entry->key.acl.ip_tos_mask & 0x3f))
1915 return -EMSGSIZE;
1916 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
1917 (entry->key.acl.ip_tos & 0xc0) >> 6))
1918 return -EMSGSIZE;
1919 if (rocker_tlv_put_u8(desc_info,
1920 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
1921 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
1922 return -EMSGSIZE;
1923 break;
1924 }
1925
1926 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
1927 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1928 entry->key.acl.group_id))
1929 return -EMSGSIZE;
1930
1931 return 0;
1932}
1933
1934static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
1935 struct rocker_port *rocker_port,
1936 struct rocker_desc_info *desc_info,
1937 void *priv)
1938{
1939 struct rocker_flow_tbl_entry *entry = priv;
1940 struct rocker_tlv *cmd_info;
1941 int err = 0;
1942
1943 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1944 ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
1945 return -EMSGSIZE;
1946 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1947 if (!cmd_info)
1948 return -EMSGSIZE;
1949 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
1950 entry->key.tbl_id))
1951 return -EMSGSIZE;
1952 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
1953 entry->key.priority))
1954 return -EMSGSIZE;
1955 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
1956 return -EMSGSIZE;
1957 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1958 entry->cookie))
1959 return -EMSGSIZE;
1960
1961 switch (entry->key.tbl_id) {
1962 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1963 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
1964 break;
1965 case ROCKER_OF_DPA_TABLE_ID_VLAN:
1966 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
1967 break;
1968 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1969 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
1970 break;
1971 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1972 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
1973 break;
1974 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1975 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
1976 break;
1977 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1978 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
1979 break;
1980 default:
1981 err = -ENOTSUPP;
1982 break;
1983 }
1984
1985 if (err)
1986 return err;
1987
1988 rocker_tlv_nest_end(desc_info, cmd_info);
1989
1990 return 0;
1991}
1992
1993static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
1994 struct rocker_port *rocker_port,
1995 struct rocker_desc_info *desc_info,
1996 void *priv)
1997{
1998 const struct rocker_flow_tbl_entry *entry = priv;
1999 struct rocker_tlv *cmd_info;
2000
2001 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
2002 ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
2003 return -EMSGSIZE;
2004 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2005 if (!cmd_info)
2006 return -EMSGSIZE;
2007 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2008 entry->cookie))
2009 return -EMSGSIZE;
2010 rocker_tlv_nest_end(desc_info, cmd_info);
2011
2012 return 0;
2013}
2014
2015static int
2016rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2017 struct rocker_group_tbl_entry *entry)
2018{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002019 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002020 ROCKER_GROUP_PORT_GET(entry->group_id)))
2021 return -EMSGSIZE;
2022 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2023 entry->l2_interface.pop_vlan))
2024 return -EMSGSIZE;
2025
2026 return 0;
2027}
2028
2029static int
2030rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2031 struct rocker_group_tbl_entry *entry)
2032{
2033 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2034 entry->l2_rewrite.group_id))
2035 return -EMSGSIZE;
2036 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2037 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2038 ETH_ALEN, entry->l2_rewrite.eth_src))
2039 return -EMSGSIZE;
2040 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2041 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2042 ETH_ALEN, entry->l2_rewrite.eth_dst))
2043 return -EMSGSIZE;
2044 if (entry->l2_rewrite.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002045 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2046 entry->l2_rewrite.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002047 return -EMSGSIZE;
2048
2049 return 0;
2050}
2051
2052static int
2053rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2054 struct rocker_group_tbl_entry *entry)
2055{
2056 int i;
2057 struct rocker_tlv *group_ids;
2058
2059 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2060 entry->group_count))
2061 return -EMSGSIZE;
2062
2063 group_ids = rocker_tlv_nest_start(desc_info,
2064 ROCKER_TLV_OF_DPA_GROUP_IDS);
2065 if (!group_ids)
2066 return -EMSGSIZE;
2067
2068 for (i = 0; i < entry->group_count; i++)
2069 /* Note TLV array is 1-based */
2070 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2071 return -EMSGSIZE;
2072
2073 rocker_tlv_nest_end(desc_info, group_ids);
2074
2075 return 0;
2076}
2077
2078static int
2079rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2080 struct rocker_group_tbl_entry *entry)
2081{
2082 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2083 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2084 ETH_ALEN, entry->l3_unicast.eth_src))
2085 return -EMSGSIZE;
2086 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2087 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2088 ETH_ALEN, entry->l3_unicast.eth_dst))
2089 return -EMSGSIZE;
2090 if (entry->l3_unicast.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002091 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2092 entry->l3_unicast.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002093 return -EMSGSIZE;
2094 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2095 entry->l3_unicast.ttl_check))
2096 return -EMSGSIZE;
2097 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2098 entry->l3_unicast.group_id))
2099 return -EMSGSIZE;
2100
2101 return 0;
2102}
2103
2104static int rocker_cmd_group_tbl_add(struct rocker *rocker,
2105 struct rocker_port *rocker_port,
2106 struct rocker_desc_info *desc_info,
2107 void *priv)
2108{
2109 struct rocker_group_tbl_entry *entry = priv;
2110 struct rocker_tlv *cmd_info;
2111 int err = 0;
2112
2113 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2114 return -EMSGSIZE;
2115 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2116 if (!cmd_info)
2117 return -EMSGSIZE;
2118
2119 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2120 entry->group_id))
2121 return -EMSGSIZE;
2122
2123 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2124 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2125 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2126 break;
2127 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2128 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2129 break;
2130 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2131 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2132 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2133 break;
2134 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2135 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2136 break;
2137 default:
2138 err = -ENOTSUPP;
2139 break;
2140 }
2141
2142 if (err)
2143 return err;
2144
2145 rocker_tlv_nest_end(desc_info, cmd_info);
2146
2147 return 0;
2148}
2149
2150static int rocker_cmd_group_tbl_del(struct rocker *rocker,
2151 struct rocker_port *rocker_port,
2152 struct rocker_desc_info *desc_info,
2153 void *priv)
2154{
2155 const struct rocker_group_tbl_entry *entry = priv;
2156 struct rocker_tlv *cmd_info;
2157
2158 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2159 return -EMSGSIZE;
2160 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2161 if (!cmd_info)
2162 return -EMSGSIZE;
2163 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2164 entry->group_id))
2165 return -EMSGSIZE;
2166 rocker_tlv_nest_end(desc_info, cmd_info);
2167
2168 return 0;
2169}
2170
2171/*****************************************
2172 * Flow, group, FDB, internal VLAN tables
2173 *****************************************/
2174
2175static int rocker_init_tbls(struct rocker *rocker)
2176{
2177 hash_init(rocker->flow_tbl);
2178 spin_lock_init(&rocker->flow_tbl_lock);
2179
2180 hash_init(rocker->group_tbl);
2181 spin_lock_init(&rocker->group_tbl_lock);
2182
2183 hash_init(rocker->fdb_tbl);
2184 spin_lock_init(&rocker->fdb_tbl_lock);
2185
2186 hash_init(rocker->internal_vlan_tbl);
2187 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2188
2189 return 0;
2190}
2191
2192static void rocker_free_tbls(struct rocker *rocker)
2193{
2194 unsigned long flags;
2195 struct rocker_flow_tbl_entry *flow_entry;
2196 struct rocker_group_tbl_entry *group_entry;
2197 struct rocker_fdb_tbl_entry *fdb_entry;
2198 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2199 struct hlist_node *tmp;
2200 int bkt;
2201
2202 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2203 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2204 hash_del(&flow_entry->entry);
2205 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2206
2207 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2208 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2209 hash_del(&group_entry->entry);
2210 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2211
2212 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2213 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2214 hash_del(&fdb_entry->entry);
2215 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2216
2217 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2218 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2219 tmp, internal_vlan_entry, entry)
2220 hash_del(&internal_vlan_entry->entry);
2221 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2222}
2223
2224static struct rocker_flow_tbl_entry *
2225rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
2226{
2227 struct rocker_flow_tbl_entry *found;
2228
2229 hash_for_each_possible(rocker->flow_tbl, found,
2230 entry, match->key_crc32) {
2231 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
2232 return found;
2233 }
2234
2235 return NULL;
2236}
2237
2238static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2239 struct rocker_flow_tbl_entry *match,
2240 bool nowait)
2241{
2242 struct rocker *rocker = rocker_port->rocker;
2243 struct rocker_flow_tbl_entry *found;
2244 unsigned long flags;
2245 bool add_to_hw = false;
2246 int err = 0;
2247
2248 match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
2249
2250 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2251
2252 found = rocker_flow_tbl_find(rocker, match);
2253
2254 if (found) {
2255 kfree(match);
2256 } else {
2257 found = match;
2258 found->cookie = rocker->flow_tbl_next_cookie++;
2259 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2260 add_to_hw = true;
2261 }
2262
2263 found->ref_count++;
2264
2265 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2266
2267 if (add_to_hw) {
2268 err = rocker_cmd_exec(rocker, rocker_port,
2269 rocker_cmd_flow_tbl_add,
2270 found, NULL, NULL, nowait);
2271 if (err) {
2272 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2273 hash_del(&found->entry);
2274 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2275 kfree(found);
2276 }
2277 }
2278
2279 return err;
2280}
2281
2282static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2283 struct rocker_flow_tbl_entry *match,
2284 bool nowait)
2285{
2286 struct rocker *rocker = rocker_port->rocker;
2287 struct rocker_flow_tbl_entry *found;
2288 unsigned long flags;
2289 bool del_from_hw = false;
2290 int err = 0;
2291
2292 match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
2293
2294 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2295
2296 found = rocker_flow_tbl_find(rocker, match);
2297
2298 if (found) {
2299 found->ref_count--;
2300 if (found->ref_count == 0) {
2301 hash_del(&found->entry);
2302 del_from_hw = true;
2303 }
2304 }
2305
2306 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2307
2308 kfree(match);
2309
2310 if (del_from_hw) {
2311 err = rocker_cmd_exec(rocker, rocker_port,
2312 rocker_cmd_flow_tbl_del,
2313 found, NULL, NULL, nowait);
2314 kfree(found);
2315 }
2316
2317 return err;
2318}
2319
2320static gfp_t rocker_op_flags_gfp(int flags)
2321{
2322 return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
2323}
2324
2325static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2326 int flags, struct rocker_flow_tbl_entry *entry)
2327{
2328 bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2329
2330 if (flags & ROCKER_OP_FLAG_REMOVE)
2331 return rocker_flow_tbl_del(rocker_port, entry, nowait);
2332 else
2333 return rocker_flow_tbl_add(rocker_port, entry, nowait);
2334}
2335
2336static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002337 int flags, u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002338 enum rocker_of_dpa_table_id goto_tbl)
2339{
2340 struct rocker_flow_tbl_entry *entry;
2341
2342 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2343 if (!entry)
2344 return -ENOMEM;
2345
2346 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2347 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002348 entry->key.ig_port.in_pport = in_pport;
2349 entry->key.ig_port.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002350 entry->key.ig_port.goto_tbl = goto_tbl;
2351
2352 return rocker_flow_tbl_do(rocker_port, flags, entry);
2353}
2354
2355static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002356 int flags, u32 in_pport,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002357 __be16 vlan_id, __be16 vlan_id_mask,
2358 enum rocker_of_dpa_table_id goto_tbl,
2359 bool untagged, __be16 new_vlan_id)
2360{
2361 struct rocker_flow_tbl_entry *entry;
2362
2363 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2364 if (!entry)
2365 return -ENOMEM;
2366
2367 entry->key.priority = ROCKER_PRIORITY_VLAN;
2368 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002369 entry->key.vlan.in_pport = in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002370 entry->key.vlan.vlan_id = vlan_id;
2371 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2372 entry->key.vlan.goto_tbl = goto_tbl;
2373
2374 entry->key.vlan.untagged = untagged;
2375 entry->key.vlan.new_vlan_id = new_vlan_id;
2376
2377 return rocker_flow_tbl_do(rocker_port, flags, entry);
2378}
2379
2380static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002381 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002382 __be16 eth_type, const u8 *eth_dst,
2383 const u8 *eth_dst_mask, __be16 vlan_id,
2384 __be16 vlan_id_mask, bool copy_to_cpu,
2385 int flags)
2386{
2387 struct rocker_flow_tbl_entry *entry;
2388
2389 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2390 if (!entry)
2391 return -ENOMEM;
2392
2393 if (is_multicast_ether_addr(eth_dst)) {
2394 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2395 entry->key.term_mac.goto_tbl =
2396 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2397 } else {
2398 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2399 entry->key.term_mac.goto_tbl =
2400 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2401 }
2402
2403 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002404 entry->key.term_mac.in_pport = in_pport;
2405 entry->key.term_mac.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002406 entry->key.term_mac.eth_type = eth_type;
2407 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2408 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2409 entry->key.term_mac.vlan_id = vlan_id;
2410 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2411 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2412
2413 return rocker_flow_tbl_do(rocker_port, flags, entry);
2414}
2415
2416static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2417 int flags,
2418 const u8 *eth_dst, const u8 *eth_dst_mask,
2419 __be16 vlan_id, u32 tunnel_id,
2420 enum rocker_of_dpa_table_id goto_tbl,
2421 u32 group_id, bool copy_to_cpu)
2422{
2423 struct rocker_flow_tbl_entry *entry;
2424 u32 priority;
2425 bool vlan_bridging = !!vlan_id;
2426 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2427 bool wild = false;
2428
2429 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2430 if (!entry)
2431 return -ENOMEM;
2432
2433 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2434
2435 if (eth_dst) {
2436 entry->key.bridge.has_eth_dst = 1;
2437 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2438 }
2439 if (eth_dst_mask) {
2440 entry->key.bridge.has_eth_dst_mask = 1;
2441 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2442 if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
2443 wild = true;
2444 }
2445
2446 priority = ROCKER_PRIORITY_UNKNOWN;
Thomas Graf51ace882014-11-28 14:34:32 +01002447 if (vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002448 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002449 else if (vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002450 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002451 else if (vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002452 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
Thomas Graf51ace882014-11-28 14:34:32 +01002453 else if (!vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002454 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002455 else if (!vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002456 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002457 else if (!vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002458 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2459
2460 entry->key.priority = priority;
2461 entry->key.bridge.vlan_id = vlan_id;
2462 entry->key.bridge.tunnel_id = tunnel_id;
2463 entry->key.bridge.goto_tbl = goto_tbl;
2464 entry->key.bridge.group_id = group_id;
2465 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2466
2467 return rocker_flow_tbl_do(rocker_port, flags, entry);
2468}
2469
2470static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002471 int flags, u32 in_pport,
2472 u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002473 const u8 *eth_src, const u8 *eth_src_mask,
2474 const u8 *eth_dst, const u8 *eth_dst_mask,
2475 __be16 eth_type,
2476 __be16 vlan_id, __be16 vlan_id_mask,
2477 u8 ip_proto, u8 ip_proto_mask,
2478 u8 ip_tos, u8 ip_tos_mask,
2479 u32 group_id)
2480{
2481 u32 priority;
2482 struct rocker_flow_tbl_entry *entry;
2483
2484 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2485 if (!entry)
2486 return -ENOMEM;
2487
2488 priority = ROCKER_PRIORITY_ACL_NORMAL;
2489 if (eth_dst && eth_dst_mask) {
2490 if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
2491 priority = ROCKER_PRIORITY_ACL_DFLT;
2492 else if (is_link_local_ether_addr(eth_dst))
2493 priority = ROCKER_PRIORITY_ACL_CTRL;
2494 }
2495
2496 entry->key.priority = priority;
2497 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002498 entry->key.acl.in_pport = in_pport;
2499 entry->key.acl.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002500
2501 if (eth_src)
2502 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2503 if (eth_src_mask)
2504 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2505 if (eth_dst)
2506 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2507 if (eth_dst_mask)
2508 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2509
2510 entry->key.acl.eth_type = eth_type;
2511 entry->key.acl.vlan_id = vlan_id;
2512 entry->key.acl.vlan_id_mask = vlan_id_mask;
2513 entry->key.acl.ip_proto = ip_proto;
2514 entry->key.acl.ip_proto_mask = ip_proto_mask;
2515 entry->key.acl.ip_tos = ip_tos;
2516 entry->key.acl.ip_tos_mask = ip_tos_mask;
2517 entry->key.acl.group_id = group_id;
2518
2519 return rocker_flow_tbl_do(rocker_port, flags, entry);
2520}
2521
2522static struct rocker_group_tbl_entry *
2523rocker_group_tbl_find(struct rocker *rocker,
2524 struct rocker_group_tbl_entry *match)
2525{
2526 struct rocker_group_tbl_entry *found;
2527
2528 hash_for_each_possible(rocker->group_tbl, found,
2529 entry, match->group_id) {
2530 if (found->group_id == match->group_id)
2531 return found;
2532 }
2533
2534 return NULL;
2535}
2536
2537static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
2538{
2539 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2540 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2541 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2542 kfree(entry->group_ids);
2543 break;
2544 default:
2545 break;
2546 }
2547 kfree(entry);
2548}
2549
2550static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2551 struct rocker_group_tbl_entry *match,
2552 bool nowait)
2553{
2554 struct rocker *rocker = rocker_port->rocker;
2555 struct rocker_group_tbl_entry *found;
2556 unsigned long flags;
2557 int err = 0;
2558
2559 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2560
2561 found = rocker_group_tbl_find(rocker, match);
2562
2563 if (found) {
2564 hash_del(&found->entry);
2565 rocker_group_tbl_entry_free(found);
2566 found = match;
2567 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2568 } else {
2569 found = match;
2570 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2571 }
2572
2573 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2574
2575 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2576
2577 if (found->cmd)
2578 err = rocker_cmd_exec(rocker, rocker_port,
2579 rocker_cmd_group_tbl_add,
2580 found, NULL, NULL, nowait);
2581
2582 return err;
2583}
2584
2585static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2586 struct rocker_group_tbl_entry *match,
2587 bool nowait)
2588{
2589 struct rocker *rocker = rocker_port->rocker;
2590 struct rocker_group_tbl_entry *found;
2591 unsigned long flags;
2592 int err = 0;
2593
2594 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2595
2596 found = rocker_group_tbl_find(rocker, match);
2597
2598 if (found) {
2599 hash_del(&found->entry);
2600 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2601 }
2602
2603 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2604
2605 rocker_group_tbl_entry_free(match);
2606
2607 if (found) {
2608 err = rocker_cmd_exec(rocker, rocker_port,
2609 rocker_cmd_group_tbl_del,
2610 found, NULL, NULL, nowait);
2611 rocker_group_tbl_entry_free(found);
2612 }
2613
2614 return err;
2615}
2616
2617static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2618 int flags, struct rocker_group_tbl_entry *entry)
2619{
2620 bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2621
2622 if (flags & ROCKER_OP_FLAG_REMOVE)
2623 return rocker_group_tbl_del(rocker_port, entry, nowait);
2624 else
2625 return rocker_group_tbl_add(rocker_port, entry, nowait);
2626}
2627
2628static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2629 int flags, __be16 vlan_id,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002630 u32 out_pport, int pop_vlan)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002631{
2632 struct rocker_group_tbl_entry *entry;
2633
2634 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2635 if (!entry)
2636 return -ENOMEM;
2637
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002638 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002639 entry->l2_interface.pop_vlan = pop_vlan;
2640
2641 return rocker_group_tbl_do(rocker_port, flags, entry);
2642}
2643
2644static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2645 int flags, u8 group_count,
2646 u32 *group_ids, u32 group_id)
2647{
2648 struct rocker_group_tbl_entry *entry;
2649
2650 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2651 if (!entry)
2652 return -ENOMEM;
2653
2654 entry->group_id = group_id;
2655 entry->group_count = group_count;
2656
2657 entry->group_ids = kcalloc(group_count, sizeof(u32),
2658 rocker_op_flags_gfp(flags));
2659 if (!entry->group_ids) {
2660 kfree(entry);
2661 return -ENOMEM;
2662 }
2663 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2664
2665 return rocker_group_tbl_do(rocker_port, flags, entry);
2666}
2667
2668static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2669 int flags, __be16 vlan_id,
2670 u8 group_count, u32 *group_ids,
2671 u32 group_id)
2672{
2673 return rocker_group_l2_fan_out(rocker_port, flags,
2674 group_count, group_ids,
2675 group_id);
2676}
2677
Scott Feldman6c707942014-11-28 14:34:28 +01002678static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
2679 int flags, __be16 vlan_id)
2680{
2681 struct rocker_port *p;
2682 struct rocker *rocker = rocker_port->rocker;
2683 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2684 u32 group_ids[rocker->port_count];
2685 u8 group_count = 0;
2686 int err;
2687 int i;
2688
2689 /* Adjust the flood group for this VLAN. The flood group
2690 * references an L2 interface group for each port in this
2691 * VLAN.
2692 */
2693
2694 for (i = 0; i < rocker->port_count; i++) {
2695 p = rocker->ports[i];
2696 if (!rocker_port_is_bridged(p))
2697 continue;
2698 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
2699 group_ids[group_count++] =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002700 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
Scott Feldman6c707942014-11-28 14:34:28 +01002701 }
2702 }
2703
2704 /* If there are no bridged ports in this VLAN, we're done */
2705 if (group_count == 0)
2706 return 0;
2707
2708 err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
2709 group_count, group_ids,
2710 group_id);
2711 if (err)
2712 netdev_err(rocker_port->dev,
2713 "Error (%d) port VLAN l2 flood group\n", err);
2714
2715 return err;
2716}
2717
2718static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
2719 int flags, __be16 vlan_id,
2720 bool pop_vlan)
2721{
2722 struct rocker *rocker = rocker_port->rocker;
2723 struct rocker_port *p;
2724 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002725 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01002726 int ref = 0;
2727 int err;
2728 int i;
2729
2730 /* An L2 interface group for this port in this VLAN, but
2731 * only when port STP state is LEARNING|FORWARDING.
2732 */
2733
2734 if (rocker_port->stp_state == BR_STATE_LEARNING ||
2735 rocker_port->stp_state == BR_STATE_FORWARDING) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002736 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01002737 err = rocker_group_l2_interface(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002738 vlan_id, out_pport,
Scott Feldman6c707942014-11-28 14:34:28 +01002739 pop_vlan);
2740 if (err) {
2741 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002742 "Error (%d) port VLAN l2 group for pport %d\n",
2743 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01002744 return err;
2745 }
2746 }
2747
2748 /* An L2 interface group for this VLAN to CPU port.
2749 * Add when first port joins this VLAN and destroy when
2750 * last port leaves this VLAN.
2751 */
2752
2753 for (i = 0; i < rocker->port_count; i++) {
2754 p = rocker->ports[i];
2755 if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
2756 ref++;
2757 }
2758
2759 if ((!adding || ref != 1) && (adding || ref != 0))
2760 return 0;
2761
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002762 out_pport = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01002763 err = rocker_group_l2_interface(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002764 vlan_id, out_pport,
Scott Feldman6c707942014-11-28 14:34:28 +01002765 pop_vlan);
2766 if (err) {
2767 netdev_err(rocker_port->dev,
2768 "Error (%d) port VLAN l2 group for CPU port\n", err);
2769 return err;
2770 }
2771
2772 return 0;
2773}
2774
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002775static struct rocker_ctrl {
2776 const u8 *eth_dst;
2777 const u8 *eth_dst_mask;
Jiri Pirko11e6c652014-12-03 14:14:54 +01002778 __be16 eth_type;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002779 bool acl;
2780 bool bridge;
2781 bool term;
2782 bool copy_to_cpu;
2783} rocker_ctrls[] = {
2784 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
2785 /* pass link local multicast pkts up to CPU for filtering */
2786 .eth_dst = ll_mac,
2787 .eth_dst_mask = ll_mask,
2788 .acl = true,
2789 },
2790 [ROCKER_CTRL_LOCAL_ARP] = {
2791 /* pass local ARP pkts up to CPU */
2792 .eth_dst = zero_mac,
2793 .eth_dst_mask = zero_mac,
2794 .eth_type = htons(ETH_P_ARP),
2795 .acl = true,
2796 },
2797 [ROCKER_CTRL_IPV4_MCAST] = {
2798 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
2799 .eth_dst = ipv4_mcast,
2800 .eth_dst_mask = ipv4_mask,
2801 .eth_type = htons(ETH_P_IP),
2802 .term = true,
2803 .copy_to_cpu = true,
2804 },
2805 [ROCKER_CTRL_IPV6_MCAST] = {
2806 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
2807 .eth_dst = ipv6_mcast,
2808 .eth_dst_mask = ipv6_mask,
2809 .eth_type = htons(ETH_P_IPV6),
2810 .term = true,
2811 .copy_to_cpu = true,
2812 },
2813 [ROCKER_CTRL_DFLT_BRIDGING] = {
2814 /* flood any pkts on vlan */
2815 .bridge = true,
2816 .copy_to_cpu = true,
2817 },
2818};
2819
2820static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
2821 int flags, struct rocker_ctrl *ctrl,
2822 __be16 vlan_id)
2823{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002824 u32 in_pport = rocker_port->pport;
2825 u32 in_pport_mask = 0xffffffff;
2826 u32 out_pport = 0;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002827 u8 *eth_src = NULL;
2828 u8 *eth_src_mask = NULL;
2829 __be16 vlan_id_mask = htons(0xffff);
2830 u8 ip_proto = 0;
2831 u8 ip_proto_mask = 0;
2832 u8 ip_tos = 0;
2833 u8 ip_tos_mask = 0;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002834 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002835 int err;
2836
2837 err = rocker_flow_tbl_acl(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002838 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002839 eth_src, eth_src_mask,
2840 ctrl->eth_dst, ctrl->eth_dst_mask,
2841 ctrl->eth_type,
2842 vlan_id, vlan_id_mask,
2843 ip_proto, ip_proto_mask,
2844 ip_tos, ip_tos_mask,
2845 group_id);
2846
2847 if (err)
2848 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
2849
2850 return err;
2851}
2852
Scott Feldman6c707942014-11-28 14:34:28 +01002853static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
2854 int flags, struct rocker_ctrl *ctrl,
2855 __be16 vlan_id)
2856{
2857 enum rocker_of_dpa_table_id goto_tbl =
2858 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2859 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2860 u32 tunnel_id = 0;
2861 int err;
2862
2863 if (!rocker_port_is_bridged(rocker_port))
2864 return 0;
2865
2866 err = rocker_flow_tbl_bridge(rocker_port, flags,
2867 ctrl->eth_dst, ctrl->eth_dst_mask,
2868 vlan_id, tunnel_id,
2869 goto_tbl, group_id, ctrl->copy_to_cpu);
2870
2871 if (err)
2872 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
2873
2874 return err;
2875}
2876
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002877static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
2878 int flags, struct rocker_ctrl *ctrl,
2879 __be16 vlan_id)
2880{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002881 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002882 __be16 vlan_id_mask = htons(0xffff);
2883 int err;
2884
2885 if (ntohs(vlan_id) == 0)
2886 vlan_id = rocker_port->internal_vlan_id;
2887
2888 err = rocker_flow_tbl_term_mac(rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002889 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002890 ctrl->eth_type, ctrl->eth_dst,
2891 ctrl->eth_dst_mask, vlan_id,
2892 vlan_id_mask, ctrl->copy_to_cpu,
2893 flags);
2894
2895 if (err)
2896 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
2897
2898 return err;
2899}
2900
2901static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
2902 struct rocker_ctrl *ctrl, __be16 vlan_id)
2903{
2904 if (ctrl->acl)
2905 return rocker_port_ctrl_vlan_acl(rocker_port, flags,
2906 ctrl, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01002907 if (ctrl->bridge)
2908 return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
2909 ctrl, vlan_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002910
2911 if (ctrl->term)
2912 return rocker_port_ctrl_vlan_term(rocker_port, flags,
2913 ctrl, vlan_id);
2914
2915 return -EOPNOTSUPP;
2916}
2917
2918static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
2919 int flags, __be16 vlan_id)
2920{
2921 int err = 0;
2922 int i;
2923
2924 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
2925 if (rocker_port->ctrls[i]) {
2926 err = rocker_port_ctrl_vlan(rocker_port, flags,
2927 &rocker_ctrls[i], vlan_id);
2928 if (err)
2929 return err;
2930 }
2931 }
2932
2933 return err;
2934}
2935
2936static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
2937 struct rocker_ctrl *ctrl)
2938{
2939 u16 vid;
2940 int err = 0;
2941
2942 for (vid = 1; vid < VLAN_N_VID; vid++) {
2943 if (!test_bit(vid, rocker_port->vlan_bitmap))
2944 continue;
2945 err = rocker_port_ctrl_vlan(rocker_port, flags,
2946 ctrl, htons(vid));
2947 if (err)
2948 break;
2949 }
2950
2951 return err;
2952}
2953
Scott Feldman6c707942014-11-28 14:34:28 +01002954static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
2955 u16 vid)
2956{
2957 enum rocker_of_dpa_table_id goto_tbl =
2958 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002959 u32 in_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01002960 __be16 vlan_id = htons(vid);
2961 __be16 vlan_id_mask = htons(0xffff);
2962 __be16 internal_vlan_id;
2963 bool untagged;
2964 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2965 int err;
2966
2967 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
2968
2969 if (adding && test_and_set_bit(ntohs(internal_vlan_id),
2970 rocker_port->vlan_bitmap))
2971 return 0; /* already added */
2972 else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
2973 rocker_port->vlan_bitmap))
2974 return 0; /* already removed */
2975
2976 if (adding) {
2977 err = rocker_port_ctrl_vlan_add(rocker_port, flags,
2978 internal_vlan_id);
2979 if (err) {
2980 netdev_err(rocker_port->dev,
2981 "Error (%d) port ctrl vlan add\n", err);
2982 return err;
2983 }
2984 }
2985
2986 err = rocker_port_vlan_l2_groups(rocker_port, flags,
2987 internal_vlan_id, untagged);
2988 if (err) {
2989 netdev_err(rocker_port->dev,
2990 "Error (%d) port VLAN l2 groups\n", err);
2991 return err;
2992 }
2993
2994 err = rocker_port_vlan_flood_group(rocker_port, flags,
2995 internal_vlan_id);
2996 if (err) {
2997 netdev_err(rocker_port->dev,
2998 "Error (%d) port VLAN l2 flood group\n", err);
2999 return err;
3000 }
3001
3002 err = rocker_flow_tbl_vlan(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003003 in_pport, vlan_id, vlan_id_mask,
Scott Feldman6c707942014-11-28 14:34:28 +01003004 goto_tbl, untagged, internal_vlan_id);
3005 if (err)
3006 netdev_err(rocker_port->dev,
3007 "Error (%d) port VLAN table\n", err);
3008
3009 return err;
3010}
3011
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003012static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
3013{
3014 enum rocker_of_dpa_table_id goto_tbl;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003015 u32 in_pport;
3016 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003017 int err;
3018
3019 /* Normal Ethernet Frames. Matches pkts from any local physical
3020 * ports. Goto VLAN tbl.
3021 */
3022
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003023 in_pport = 0;
3024 in_pport_mask = 0xffff0000;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003025 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3026
3027 err = rocker_flow_tbl_ig_port(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003028 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003029 goto_tbl);
3030 if (err)
3031 netdev_err(rocker_port->dev,
3032 "Error (%d) ingress port table entry\n", err);
3033
3034 return err;
3035}
3036
Scott Feldman6c707942014-11-28 14:34:28 +01003037struct rocker_fdb_learn_work {
3038 struct work_struct work;
3039 struct net_device *dev;
3040 int flags;
3041 u8 addr[ETH_ALEN];
3042 u16 vid;
3043};
3044
3045static void rocker_port_fdb_learn_work(struct work_struct *work)
3046{
3047 struct rocker_fdb_learn_work *lw =
3048 container_of(work, struct rocker_fdb_learn_work, work);
3049 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3050 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003051 struct netdev_switch_notifier_fdb_info info;
3052
3053 info.addr = lw->addr;
3054 info.vid = lw->vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003055
Thomas Graf51ace882014-11-28 14:34:32 +01003056 if (learned && removing)
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003057 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
3058 lw->dev, &info.info);
Thomas Graf51ace882014-11-28 14:34:32 +01003059 else if (learned && !removing)
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003060 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
3061 lw->dev, &info.info);
Scott Feldman6c707942014-11-28 14:34:28 +01003062
3063 kfree(work);
3064}
3065
3066static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3067 int flags, const u8 *addr, __be16 vlan_id)
3068{
3069 struct rocker_fdb_learn_work *lw;
3070 enum rocker_of_dpa_table_id goto_tbl =
3071 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003072 u32 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003073 u32 tunnel_id = 0;
3074 u32 group_id = ROCKER_GROUP_NONE;
Scott Feldman5111f802014-11-28 14:34:30 +01003075 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
Scott Feldman6c707942014-11-28 14:34:28 +01003076 bool copy_to_cpu = false;
3077 int err;
3078
3079 if (rocker_port_is_bridged(rocker_port))
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003080 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003081
3082 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3083 err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
3084 vlan_id, tunnel_id, goto_tbl,
3085 group_id, copy_to_cpu);
3086 if (err)
3087 return err;
3088 }
3089
Scott Feldman5111f802014-11-28 14:34:30 +01003090 if (!syncing)
3091 return 0;
3092
Scott Feldman6c707942014-11-28 14:34:28 +01003093 if (!rocker_port_is_bridged(rocker_port))
3094 return 0;
3095
3096 lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
3097 if (!lw)
3098 return -ENOMEM;
3099
3100 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3101
3102 lw->dev = rocker_port->dev;
3103 lw->flags = flags;
3104 ether_addr_copy(lw->addr, addr);
3105 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3106
3107 schedule_work(&lw->work);
3108
3109 return 0;
3110}
3111
3112static struct rocker_fdb_tbl_entry *
3113rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
3114{
3115 struct rocker_fdb_tbl_entry *found;
3116
3117 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3118 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3119 return found;
3120
3121 return NULL;
3122}
3123
3124static int rocker_port_fdb(struct rocker_port *rocker_port,
3125 const unsigned char *addr,
3126 __be16 vlan_id, int flags)
3127{
3128 struct rocker *rocker = rocker_port->rocker;
3129 struct rocker_fdb_tbl_entry *fdb;
3130 struct rocker_fdb_tbl_entry *found;
3131 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3132 unsigned long lock_flags;
3133
3134 fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
3135 if (!fdb)
3136 return -ENOMEM;
3137
3138 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003139 fdb->key.pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003140 ether_addr_copy(fdb->key.addr, addr);
3141 fdb->key.vlan_id = vlan_id;
3142 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3143
3144 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3145
3146 found = rocker_fdb_tbl_find(rocker, fdb);
3147
3148 if (removing && found) {
3149 kfree(fdb);
3150 hash_del(&found->entry);
3151 } else if (!removing && !found) {
3152 hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
3153 }
3154
3155 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3156
3157 /* Check if adding and already exists, or removing and can't find */
3158 if (!found != !removing) {
3159 kfree(fdb);
3160 if (!found && removing)
3161 return 0;
3162 /* Refreshing existing to update aging timers */
3163 flags |= ROCKER_OP_FLAG_REFRESH;
3164 }
3165
3166 return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
3167}
3168
3169static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
3170{
3171 struct rocker *rocker = rocker_port->rocker;
3172 struct rocker_fdb_tbl_entry *found;
3173 unsigned long lock_flags;
3174 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3175 struct hlist_node *tmp;
3176 int bkt;
3177 int err = 0;
3178
3179 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3180 rocker_port->stp_state == BR_STATE_FORWARDING)
3181 return 0;
3182
3183 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3184
3185 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003186 if (found->key.pport != rocker_port->pport)
Scott Feldman6c707942014-11-28 14:34:28 +01003187 continue;
3188 if (!found->learned)
3189 continue;
3190 err = rocker_port_fdb_learn(rocker_port, flags,
3191 found->key.addr,
3192 found->key.vlan_id);
3193 if (err)
3194 goto err_out;
3195 hash_del(&found->entry);
3196 }
3197
3198err_out:
3199 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3200
3201 return err;
3202}
3203
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003204static int rocker_port_router_mac(struct rocker_port *rocker_port,
3205 int flags, __be16 vlan_id)
3206{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003207 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003208 __be16 eth_type;
3209 const u8 *dst_mac_mask = ff_mac;
3210 __be16 vlan_id_mask = htons(0xffff);
3211 bool copy_to_cpu = false;
3212 int err;
3213
3214 if (ntohs(vlan_id) == 0)
3215 vlan_id = rocker_port->internal_vlan_id;
3216
3217 eth_type = htons(ETH_P_IP);
3218 err = rocker_flow_tbl_term_mac(rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003219 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003220 eth_type, rocker_port->dev->dev_addr,
3221 dst_mac_mask, vlan_id, vlan_id_mask,
3222 copy_to_cpu, flags);
3223 if (err)
3224 return err;
3225
3226 eth_type = htons(ETH_P_IPV6);
3227 err = rocker_flow_tbl_term_mac(rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003228 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003229 eth_type, rocker_port->dev->dev_addr,
3230 dst_mac_mask, vlan_id, vlan_id_mask,
3231 copy_to_cpu, flags);
3232
3233 return err;
3234}
3235
Scott Feldman6c707942014-11-28 14:34:28 +01003236static int rocker_port_fwding(struct rocker_port *rocker_port)
3237{
3238 bool pop_vlan;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003239 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003240 __be16 vlan_id;
3241 u16 vid;
3242 int flags = ROCKER_OP_FLAG_NOWAIT;
3243 int err;
3244
3245 /* Port will be forwarding-enabled if its STP state is LEARNING
3246 * or FORWARDING. Traffic from CPU can still egress, regardless of
3247 * port STP state. Use L2 interface group on port VLANs as a way
3248 * to toggle port forwarding: if forwarding is disabled, L2
3249 * interface group will not exist.
3250 */
3251
3252 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3253 rocker_port->stp_state != BR_STATE_FORWARDING)
3254 flags |= ROCKER_OP_FLAG_REMOVE;
3255
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003256 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003257 for (vid = 1; vid < VLAN_N_VID; vid++) {
3258 if (!test_bit(vid, rocker_port->vlan_bitmap))
3259 continue;
3260 vlan_id = htons(vid);
3261 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3262 err = rocker_group_l2_interface(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003263 vlan_id, out_pport,
Scott Feldman6c707942014-11-28 14:34:28 +01003264 pop_vlan);
3265 if (err) {
3266 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003267 "Error (%d) port VLAN l2 group for pport %d\n",
3268 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003269 return err;
3270 }
3271 }
3272
3273 return 0;
3274}
3275
3276static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
3277{
3278 bool want[ROCKER_CTRL_MAX] = { 0, };
3279 int flags;
3280 int err;
3281 int i;
3282
3283 if (rocker_port->stp_state == state)
3284 return 0;
3285
3286 rocker_port->stp_state = state;
3287
3288 switch (state) {
3289 case BR_STATE_DISABLED:
3290 /* port is completely disabled */
3291 break;
3292 case BR_STATE_LISTENING:
3293 case BR_STATE_BLOCKING:
3294 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3295 break;
3296 case BR_STATE_LEARNING:
3297 case BR_STATE_FORWARDING:
3298 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3299 want[ROCKER_CTRL_IPV4_MCAST] = true;
3300 want[ROCKER_CTRL_IPV6_MCAST] = true;
3301 if (rocker_port_is_bridged(rocker_port))
3302 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3303 else
3304 want[ROCKER_CTRL_LOCAL_ARP] = true;
3305 break;
3306 }
3307
3308 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3309 if (want[i] != rocker_port->ctrls[i]) {
3310 flags = ROCKER_OP_FLAG_NOWAIT |
3311 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3312 err = rocker_port_ctrl(rocker_port, flags,
3313 &rocker_ctrls[i]);
3314 if (err)
3315 return err;
3316 rocker_port->ctrls[i] = want[i];
3317 }
3318 }
3319
3320 err = rocker_port_fdb_flush(rocker_port);
3321 if (err)
3322 return err;
3323
3324 return rocker_port_fwding(rocker_port);
3325}
3326
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003327static struct rocker_internal_vlan_tbl_entry *
3328rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
3329{
3330 struct rocker_internal_vlan_tbl_entry *found;
3331
3332 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3333 entry, ifindex) {
3334 if (found->ifindex == ifindex)
3335 return found;
3336 }
3337
3338 return NULL;
3339}
3340
3341static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3342 int ifindex)
3343{
3344 struct rocker *rocker = rocker_port->rocker;
3345 struct rocker_internal_vlan_tbl_entry *entry;
3346 struct rocker_internal_vlan_tbl_entry *found;
3347 unsigned long lock_flags;
3348 int i;
3349
3350 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3351 if (!entry)
3352 return 0;
3353
3354 entry->ifindex = ifindex;
3355
3356 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3357
3358 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3359 if (found) {
3360 kfree(entry);
3361 goto found;
3362 }
3363
3364 found = entry;
3365 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3366
3367 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3368 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3369 continue;
3370 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3371 goto found;
3372 }
3373
3374 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3375
3376found:
3377 found->ref_count++;
3378 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3379
3380 return found->vlan_id;
3381}
3382
3383static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
3384 int ifindex)
3385{
3386 struct rocker *rocker = rocker_port->rocker;
3387 struct rocker_internal_vlan_tbl_entry *found;
3388 unsigned long lock_flags;
3389 unsigned long bit;
3390
3391 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3392
3393 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3394 if (!found) {
3395 netdev_err(rocker_port->dev,
3396 "ifindex (%d) not found in internal VLAN tbl\n",
3397 ifindex);
3398 goto not_found;
3399 }
3400
3401 if (--found->ref_count <= 0) {
3402 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3403 clear_bit(bit, rocker->internal_vlan_bitmap);
3404 hash_del(&found->entry);
3405 kfree(found);
3406 }
3407
3408not_found:
3409 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3410}
3411
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003412/*****************
3413 * Net device ops
3414 *****************/
3415
3416static int rocker_port_open(struct net_device *dev)
3417{
3418 struct rocker_port *rocker_port = netdev_priv(dev);
Scott Feldman6c707942014-11-28 14:34:28 +01003419 u8 stp_state = rocker_port_is_bridged(rocker_port) ?
3420 BR_STATE_BLOCKING : BR_STATE_FORWARDING;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003421 int err;
3422
3423 err = rocker_port_dma_rings_init(rocker_port);
3424 if (err)
3425 return err;
3426
3427 err = request_irq(rocker_msix_tx_vector(rocker_port),
3428 rocker_tx_irq_handler, 0,
3429 rocker_driver_name, rocker_port);
3430 if (err) {
3431 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3432 goto err_request_tx_irq;
3433 }
3434
3435 err = request_irq(rocker_msix_rx_vector(rocker_port),
3436 rocker_rx_irq_handler, 0,
3437 rocker_driver_name, rocker_port);
3438 if (err) {
3439 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3440 goto err_request_rx_irq;
3441 }
3442
Scott Feldman6c707942014-11-28 14:34:28 +01003443 err = rocker_port_stp_update(rocker_port, stp_state);
3444 if (err)
3445 goto err_stp_update;
3446
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003447 napi_enable(&rocker_port->napi_tx);
3448 napi_enable(&rocker_port->napi_rx);
3449 rocker_port_set_enable(rocker_port, true);
3450 netif_start_queue(dev);
3451 return 0;
3452
Scott Feldman6c707942014-11-28 14:34:28 +01003453err_stp_update:
3454 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003455err_request_rx_irq:
3456 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3457err_request_tx_irq:
3458 rocker_port_dma_rings_fini(rocker_port);
3459 return err;
3460}
3461
3462static int rocker_port_stop(struct net_device *dev)
3463{
3464 struct rocker_port *rocker_port = netdev_priv(dev);
3465
3466 netif_stop_queue(dev);
3467 rocker_port_set_enable(rocker_port, false);
3468 napi_disable(&rocker_port->napi_rx);
3469 napi_disable(&rocker_port->napi_tx);
Scott Feldman6c707942014-11-28 14:34:28 +01003470 rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003471 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3472 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3473 rocker_port_dma_rings_fini(rocker_port);
3474
3475 return 0;
3476}
3477
3478static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
3479 struct rocker_desc_info *desc_info)
3480{
3481 struct rocker *rocker = rocker_port->rocker;
3482 struct pci_dev *pdev = rocker->pdev;
3483 struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
3484 struct rocker_tlv *attr;
3485 int rem;
3486
3487 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
3488 if (!attrs[ROCKER_TLV_TX_FRAGS])
3489 return;
3490 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
3491 struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
3492 dma_addr_t dma_handle;
3493 size_t len;
3494
3495 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
3496 continue;
3497 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
3498 attr);
3499 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
3500 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
3501 continue;
3502 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
3503 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
3504 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
3505 }
3506}
3507
3508static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
3509 struct rocker_desc_info *desc_info,
3510 char *buf, size_t buf_len)
3511{
3512 struct rocker *rocker = rocker_port->rocker;
3513 struct pci_dev *pdev = rocker->pdev;
3514 dma_addr_t dma_handle;
3515 struct rocker_tlv *frag;
3516
3517 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
3518 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
3519 if (net_ratelimit())
3520 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
3521 return -EIO;
3522 }
3523 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
3524 if (!frag)
3525 goto unmap_frag;
3526 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
3527 dma_handle))
3528 goto nest_cancel;
3529 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
3530 buf_len))
3531 goto nest_cancel;
3532 rocker_tlv_nest_end(desc_info, frag);
3533 return 0;
3534
3535nest_cancel:
3536 rocker_tlv_nest_cancel(desc_info, frag);
3537unmap_frag:
3538 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
3539 return -EMSGSIZE;
3540}
3541
3542static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
3543{
3544 struct rocker_port *rocker_port = netdev_priv(dev);
3545 struct rocker *rocker = rocker_port->rocker;
3546 struct rocker_desc_info *desc_info;
3547 struct rocker_tlv *frags;
3548 int i;
3549 int err;
3550
3551 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3552 if (unlikely(!desc_info)) {
3553 if (net_ratelimit())
3554 netdev_err(dev, "tx ring full when queue awake\n");
3555 return NETDEV_TX_BUSY;
3556 }
3557
3558 rocker_desc_cookie_ptr_set(desc_info, skb);
3559
3560 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
3561 if (!frags)
3562 goto out;
3563 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3564 skb->data, skb_headlen(skb));
3565 if (err)
3566 goto nest_cancel;
3567 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
3568 goto nest_cancel;
3569
3570 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3571 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3572
3573 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3574 skb_frag_address(frag),
3575 skb_frag_size(frag));
3576 if (err)
3577 goto unmap_frags;
3578 }
3579 rocker_tlv_nest_end(desc_info, frags);
3580
3581 rocker_desc_gen_clear(desc_info);
3582 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
3583
3584 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3585 if (!desc_info)
3586 netif_stop_queue(dev);
3587
3588 return NETDEV_TX_OK;
3589
3590unmap_frags:
3591 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
3592nest_cancel:
3593 rocker_tlv_nest_cancel(desc_info, frags);
3594out:
3595 dev_kfree_skb(skb);
David Ahernf2bbca52015-01-16 14:22:29 -07003596 dev->stats.tx_dropped++;
3597
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003598 return NETDEV_TX_OK;
3599}
3600
3601static int rocker_port_set_mac_address(struct net_device *dev, void *p)
3602{
3603 struct sockaddr *addr = p;
3604 struct rocker_port *rocker_port = netdev_priv(dev);
3605 int err;
3606
3607 if (!is_valid_ether_addr(addr->sa_data))
3608 return -EADDRNOTAVAIL;
3609
3610 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
3611 if (err)
3612 return err;
3613 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3614 return 0;
3615}
3616
Scott Feldman6c707942014-11-28 14:34:28 +01003617static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
3618 __be16 proto, u16 vid)
3619{
3620 struct rocker_port *rocker_port = netdev_priv(dev);
3621 int err;
3622
3623 err = rocker_port_vlan(rocker_port, 0, vid);
3624 if (err)
3625 return err;
3626
3627 return rocker_port_router_mac(rocker_port, 0, htons(vid));
3628}
3629
3630static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
3631 __be16 proto, u16 vid)
3632{
3633 struct rocker_port *rocker_port = netdev_priv(dev);
3634 int err;
3635
3636 err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
3637 htons(vid));
3638 if (err)
3639 return err;
3640
3641 return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
3642}
3643
3644static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
3645 struct net_device *dev,
3646 const unsigned char *addr, u16 vid,
3647 u16 nlm_flags)
3648{
3649 struct rocker_port *rocker_port = netdev_priv(dev);
3650 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
3651 int flags = 0;
3652
3653 if (!rocker_port_is_bridged(rocker_port))
3654 return -EINVAL;
3655
3656 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
3657}
3658
3659static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
3660 struct net_device *dev,
3661 const unsigned char *addr, u16 vid)
3662{
3663 struct rocker_port *rocker_port = netdev_priv(dev);
3664 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
3665 int flags = ROCKER_OP_FLAG_REMOVE;
3666
3667 if (!rocker_port_is_bridged(rocker_port))
3668 return -EINVAL;
3669
3670 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
3671}
3672
Jiri Pirkoce76ca62014-11-28 14:34:29 +01003673static int rocker_fdb_fill_info(struct sk_buff *skb,
3674 struct rocker_port *rocker_port,
3675 const unsigned char *addr, u16 vid,
3676 u32 portid, u32 seq, int type,
3677 unsigned int flags)
3678{
3679 struct nlmsghdr *nlh;
3680 struct ndmsg *ndm;
3681
3682 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
3683 if (!nlh)
3684 return -EMSGSIZE;
3685
3686 ndm = nlmsg_data(nlh);
3687 ndm->ndm_family = AF_BRIDGE;
3688 ndm->ndm_pad1 = 0;
3689 ndm->ndm_pad2 = 0;
3690 ndm->ndm_flags = NTF_SELF;
3691 ndm->ndm_type = 0;
3692 ndm->ndm_ifindex = rocker_port->dev->ifindex;
3693 ndm->ndm_state = NUD_REACHABLE;
3694
3695 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
3696 goto nla_put_failure;
3697
3698 if (vid && nla_put_u16(skb, NDA_VLAN, vid))
3699 goto nla_put_failure;
3700
Johannes Berg053c0952015-01-16 22:09:00 +01003701 nlmsg_end(skb, nlh);
3702 return 0;
Jiri Pirkoce76ca62014-11-28 14:34:29 +01003703
3704nla_put_failure:
3705 nlmsg_cancel(skb, nlh);
3706 return -EMSGSIZE;
3707}
3708
3709static int rocker_port_fdb_dump(struct sk_buff *skb,
3710 struct netlink_callback *cb,
3711 struct net_device *dev,
3712 struct net_device *filter_dev,
3713 int idx)
3714{
3715 struct rocker_port *rocker_port = netdev_priv(dev);
3716 struct rocker *rocker = rocker_port->rocker;
3717 struct rocker_fdb_tbl_entry *found;
3718 struct hlist_node *tmp;
3719 int bkt;
3720 unsigned long lock_flags;
3721 const unsigned char *addr;
3722 u16 vid;
3723 int err;
3724
3725 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3726 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003727 if (found->key.pport != rocker_port->pport)
Jiri Pirkoce76ca62014-11-28 14:34:29 +01003728 continue;
3729 if (idx < cb->args[0])
3730 goto skip;
3731 addr = found->key.addr;
3732 vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
3733 err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
3734 NETLINK_CB(cb->skb).portid,
3735 cb->nlh->nlmsg_seq,
3736 RTM_NEWNEIGH, NLM_F_MULTI);
3737 if (err < 0)
3738 break;
3739skip:
3740 ++idx;
3741 }
3742 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3743 return idx;
3744}
3745
Scott Feldman5111f802014-11-28 14:34:30 +01003746static int rocker_port_bridge_setlink(struct net_device *dev,
Roopa Prabhuadd511b2015-01-29 22:40:12 -08003747 struct nlmsghdr *nlh, u16 flags)
Scott Feldman5111f802014-11-28 14:34:30 +01003748{
3749 struct rocker_port *rocker_port = netdev_priv(dev);
3750 struct nlattr *protinfo;
Scott Feldman5111f802014-11-28 14:34:30 +01003751 struct nlattr *attr;
Scott Feldman5111f802014-11-28 14:34:30 +01003752 int err;
3753
3754 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
3755 IFLA_PROTINFO);
Scott Feldman5111f802014-11-28 14:34:30 +01003756 if (protinfo) {
3757 attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
3758 if (attr) {
Thomas Grafe7560582014-11-28 14:34:31 +01003759 if (nla_len(attr) < sizeof(u8))
3760 return -EINVAL;
3761
Scott Feldman5111f802014-11-28 14:34:30 +01003762 if (nla_get_u8(attr))
3763 rocker_port->brport_flags |= BR_LEARNING;
3764 else
3765 rocker_port->brport_flags &= ~BR_LEARNING;
3766 err = rocker_port_set_learning(rocker_port);
3767 if (err)
3768 return err;
3769 }
3770 attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
3771 if (attr) {
Thomas Grafe7560582014-11-28 14:34:31 +01003772 if (nla_len(attr) < sizeof(u8))
3773 return -EINVAL;
3774
Scott Feldman5111f802014-11-28 14:34:30 +01003775 if (nla_get_u8(attr))
3776 rocker_port->brport_flags |= BR_LEARNING_SYNC;
3777 else
3778 rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
3779 }
3780 }
3781
3782 return 0;
3783}
3784
3785static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3786 struct net_device *dev,
3787 u32 filter_mask)
3788{
3789 struct rocker_port *rocker_port = netdev_priv(dev);
Roopa Prabhu1d460b92014-12-08 14:04:20 -08003790 u16 mode = BRIDGE_MODE_UNDEF;
Scott Feldman5111f802014-11-28 14:34:30 +01003791 u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
3792
3793 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
3794 rocker_port->brport_flags, mask);
3795}
3796
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003797static int rocker_port_switch_parent_id_get(struct net_device *dev,
3798 struct netdev_phys_item_id *psid)
3799{
3800 struct rocker_port *rocker_port = netdev_priv(dev);
3801 struct rocker *rocker = rocker_port->rocker;
3802
3803 psid->id_len = sizeof(rocker->hw.id);
3804 memcpy(&psid->id, &rocker->hw.id, psid->id_len);
3805 return 0;
3806}
3807
Scott Feldman6c707942014-11-28 14:34:28 +01003808static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
3809{
3810 struct rocker_port *rocker_port = netdev_priv(dev);
3811
3812 return rocker_port_stp_update(rocker_port, state);
3813}
3814
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003815static const struct net_device_ops rocker_port_netdev_ops = {
3816 .ndo_open = rocker_port_open,
3817 .ndo_stop = rocker_port_stop,
3818 .ndo_start_xmit = rocker_port_xmit,
3819 .ndo_set_mac_address = rocker_port_set_mac_address,
Scott Feldman6c707942014-11-28 14:34:28 +01003820 .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
3821 .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
3822 .ndo_fdb_add = rocker_port_fdb_add,
3823 .ndo_fdb_del = rocker_port_fdb_del,
Jiri Pirkoce76ca62014-11-28 14:34:29 +01003824 .ndo_fdb_dump = rocker_port_fdb_dump,
Scott Feldman5111f802014-11-28 14:34:30 +01003825 .ndo_bridge_setlink = rocker_port_bridge_setlink,
3826 .ndo_bridge_getlink = rocker_port_bridge_getlink,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003827 .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get,
Scott Feldman6c707942014-11-28 14:34:28 +01003828 .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003829};
3830
3831/********************
3832 * ethtool interface
3833 ********************/
3834
3835static int rocker_port_get_settings(struct net_device *dev,
3836 struct ethtool_cmd *ecmd)
3837{
3838 struct rocker_port *rocker_port = netdev_priv(dev);
3839
3840 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
3841}
3842
3843static int rocker_port_set_settings(struct net_device *dev,
3844 struct ethtool_cmd *ecmd)
3845{
3846 struct rocker_port *rocker_port = netdev_priv(dev);
3847
3848 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
3849}
3850
3851static void rocker_port_get_drvinfo(struct net_device *dev,
3852 struct ethtool_drvinfo *drvinfo)
3853{
3854 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
3855 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
3856}
3857
David Ahern9766e972015-01-29 20:59:33 -07003858static struct rocker_port_stats {
3859 char str[ETH_GSTRING_LEN];
3860 int type;
3861} rocker_port_stats[] = {
3862 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
3863 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
3864 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
3865 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
3866
3867 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
3868 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
3869 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
3870 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
3871};
3872
3873#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
3874
3875static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
3876 u8 *data)
3877{
3878 u8 *p = data;
3879 int i;
3880
3881 switch (stringset) {
3882 case ETH_SS_STATS:
3883 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
3884 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
3885 p += ETH_GSTRING_LEN;
3886 }
3887 break;
3888 }
3889}
3890
3891static int
3892rocker_cmd_get_port_stats_prep(struct rocker *rocker,
3893 struct rocker_port *rocker_port,
3894 struct rocker_desc_info *desc_info,
3895 void *priv)
3896{
3897 struct rocker_tlv *cmd_stats;
3898
3899 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
3900 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
3901 return -EMSGSIZE;
3902
3903 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
3904 if (!cmd_stats)
3905 return -EMSGSIZE;
3906
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003907 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
3908 rocker_port->pport))
David Ahern9766e972015-01-29 20:59:33 -07003909 return -EMSGSIZE;
3910
3911 rocker_tlv_nest_end(desc_info, cmd_stats);
3912
3913 return 0;
3914}
3915
3916static int
3917rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
3918 struct rocker_port *rocker_port,
3919 struct rocker_desc_info *desc_info,
3920 void *priv)
3921{
3922 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
3923 struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
3924 struct rocker_tlv *pattr;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003925 u32 pport;
David Ahern9766e972015-01-29 20:59:33 -07003926 u64 *data = priv;
3927 int i;
3928
3929 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
3930
3931 if (!attrs[ROCKER_TLV_CMD_INFO])
3932 return -EIO;
3933
3934 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
3935 attrs[ROCKER_TLV_CMD_INFO]);
3936
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003937 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
David Ahern9766e972015-01-29 20:59:33 -07003938 return -EIO;
3939
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003940 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
3941 if (pport != rocker_port->pport)
David Ahern9766e972015-01-29 20:59:33 -07003942 return -EIO;
3943
3944 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
3945 pattr = stats_attrs[rocker_port_stats[i].type];
3946 if (!pattr)
3947 continue;
3948
3949 data[i] = rocker_tlv_get_u64(pattr);
3950 }
3951
3952 return 0;
3953}
3954
3955static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
3956 void *priv)
3957{
3958 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
3959 rocker_cmd_get_port_stats_prep, NULL,
3960 rocker_cmd_get_port_stats_ethtool_proc,
3961 priv, false);
3962}
3963
3964static void rocker_port_get_stats(struct net_device *dev,
3965 struct ethtool_stats *stats, u64 *data)
3966{
3967 struct rocker_port *rocker_port = netdev_priv(dev);
3968
3969 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
3970 int i;
3971
3972 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
3973 data[i] = 0;
3974 }
3975
3976 return;
3977}
3978
3979static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
3980{
3981 switch (sset) {
3982 case ETH_SS_STATS:
3983 return ROCKER_PORT_STATS_LEN;
3984 default:
3985 return -EOPNOTSUPP;
3986 }
3987}
3988
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003989static const struct ethtool_ops rocker_port_ethtool_ops = {
3990 .get_settings = rocker_port_get_settings,
3991 .set_settings = rocker_port_set_settings,
3992 .get_drvinfo = rocker_port_get_drvinfo,
3993 .get_link = ethtool_op_get_link,
David Ahern9766e972015-01-29 20:59:33 -07003994 .get_strings = rocker_port_get_strings,
3995 .get_ethtool_stats = rocker_port_get_stats,
3996 .get_sset_count = rocker_port_get_sset_count,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003997};
3998
3999/*****************
4000 * NAPI interface
4001 *****************/
4002
4003static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4004{
4005 return container_of(napi, struct rocker_port, napi_tx);
4006}
4007
4008static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4009{
4010 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4011 struct rocker *rocker = rocker_port->rocker;
4012 struct rocker_desc_info *desc_info;
4013 u32 credits = 0;
4014 int err;
4015
4016 /* Cleanup tx descriptors */
4017 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
David Ahernf2bbca52015-01-16 14:22:29 -07004018 struct sk_buff *skb;
4019
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004020 err = rocker_desc_err(desc_info);
4021 if (err && net_ratelimit())
4022 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4023 err);
4024 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
David Ahernf2bbca52015-01-16 14:22:29 -07004025
4026 skb = rocker_desc_cookie_ptr_get(desc_info);
4027 if (err == 0) {
4028 rocker_port->dev->stats.tx_packets++;
4029 rocker_port->dev->stats.tx_bytes += skb->len;
4030 } else
4031 rocker_port->dev->stats.tx_errors++;
4032
4033 dev_kfree_skb_any(skb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004034 credits++;
4035 }
4036
4037 if (credits && netif_queue_stopped(rocker_port->dev))
4038 netif_wake_queue(rocker_port->dev);
4039
4040 napi_complete(napi);
4041 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4042
4043 return 0;
4044}
4045
4046static int rocker_port_rx_proc(struct rocker *rocker,
4047 struct rocker_port *rocker_port,
4048 struct rocker_desc_info *desc_info)
4049{
4050 struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4051 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4052 size_t rx_len;
4053
4054 if (!skb)
4055 return -ENOENT;
4056
4057 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4058 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4059 return -EINVAL;
4060
4061 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4062
4063 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4064 skb_put(skb, rx_len);
4065 skb->protocol = eth_type_trans(skb, rocker_port->dev);
David Ahernf2bbca52015-01-16 14:22:29 -07004066
4067 rocker_port->dev->stats.rx_packets++;
4068 rocker_port->dev->stats.rx_bytes += skb->len;
4069
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004070 netif_receive_skb(skb);
4071
4072 return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
4073}
4074
4075static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4076{
4077 return container_of(napi, struct rocker_port, napi_rx);
4078}
4079
4080static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4081{
4082 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4083 struct rocker *rocker = rocker_port->rocker;
4084 struct rocker_desc_info *desc_info;
4085 u32 credits = 0;
4086 int err;
4087
4088 /* Process rx descriptors */
4089 while (credits < budget &&
4090 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4091 err = rocker_desc_err(desc_info);
4092 if (err) {
4093 if (net_ratelimit())
4094 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4095 err);
4096 } else {
4097 err = rocker_port_rx_proc(rocker, rocker_port,
4098 desc_info);
4099 if (err && net_ratelimit())
4100 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4101 err);
4102 }
David Ahernf2bbca52015-01-16 14:22:29 -07004103 if (err)
4104 rocker_port->dev->stats.rx_errors++;
4105
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004106 rocker_desc_gen_clear(desc_info);
4107 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4108 credits++;
4109 }
4110
4111 if (credits < budget)
4112 napi_complete(napi);
4113
4114 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4115
4116 return credits;
4117}
4118
4119/*****************
4120 * PCI driver ops
4121 *****************/
4122
4123static void rocker_carrier_init(struct rocker_port *rocker_port)
4124{
4125 struct rocker *rocker = rocker_port->rocker;
4126 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4127 bool link_up;
4128
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004129 link_up = link_status & (1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004130 if (link_up)
4131 netif_carrier_on(rocker_port->dev);
4132 else
4133 netif_carrier_off(rocker_port->dev);
4134}
4135
4136static void rocker_remove_ports(struct rocker *rocker)
4137{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004138 struct rocker_port *rocker_port;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004139 int i;
4140
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004141 for (i = 0; i < rocker->port_count; i++) {
4142 rocker_port = rocker->ports[i];
4143 rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
4144 unregister_netdev(rocker_port->dev);
4145 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004146 kfree(rocker->ports);
4147}
4148
4149static void rocker_port_dev_addr_init(struct rocker *rocker,
4150 struct rocker_port *rocker_port)
4151{
4152 struct pci_dev *pdev = rocker->pdev;
4153 int err;
4154
4155 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4156 rocker_port->dev->dev_addr);
4157 if (err) {
4158 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4159 eth_hw_addr_random(rocker_port->dev);
4160 }
4161}
4162
4163static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4164{
4165 struct pci_dev *pdev = rocker->pdev;
4166 struct rocker_port *rocker_port;
4167 struct net_device *dev;
4168 int err;
4169
4170 dev = alloc_etherdev(sizeof(struct rocker_port));
4171 if (!dev)
4172 return -ENOMEM;
4173 rocker_port = netdev_priv(dev);
4174 rocker_port->dev = dev;
4175 rocker_port->rocker = rocker;
4176 rocker_port->port_number = port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004177 rocker_port->pport = port_number + 1;
Scott Feldman5111f802014-11-28 14:34:30 +01004178 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004179
4180 rocker_port_dev_addr_init(rocker, rocker_port);
4181 dev->netdev_ops = &rocker_port_netdev_ops;
4182 dev->ethtool_ops = &rocker_port_ethtool_ops;
4183 netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4184 NAPI_POLL_WEIGHT);
4185 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4186 NAPI_POLL_WEIGHT);
4187 rocker_carrier_init(rocker_port);
4188
Roopa Prabhueb0ac422015-01-29 22:40:15 -08004189 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4190 NETIF_F_HW_SWITCH_OFFLOAD;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004191
4192 err = register_netdev(dev);
4193 if (err) {
4194 dev_err(&pdev->dev, "register_netdev failed\n");
4195 goto err_register_netdev;
4196 }
4197 rocker->ports[port_number] = rocker_port;
4198
Scott Feldman5111f802014-11-28 14:34:30 +01004199 rocker_port_set_learning(rocker_port);
4200
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004201 rocker_port->internal_vlan_id =
4202 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
4203 err = rocker_port_ig_tbl(rocker_port, 0);
4204 if (err) {
4205 dev_err(&pdev->dev, "install ig port table failed\n");
4206 goto err_port_ig_tbl;
4207 }
4208
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004209 return 0;
4210
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004211err_port_ig_tbl:
4212 unregister_netdev(dev);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004213err_register_netdev:
4214 free_netdev(dev);
4215 return err;
4216}
4217
4218static int rocker_probe_ports(struct rocker *rocker)
4219{
4220 int i;
4221 size_t alloc_size;
4222 int err;
4223
4224 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4225 rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
4226 for (i = 0; i < rocker->port_count; i++) {
4227 err = rocker_probe_port(rocker, i);
4228 if (err)
4229 goto remove_ports;
4230 }
4231 return 0;
4232
4233remove_ports:
4234 rocker_remove_ports(rocker);
4235 return err;
4236}
4237
4238static int rocker_msix_init(struct rocker *rocker)
4239{
4240 struct pci_dev *pdev = rocker->pdev;
4241 int msix_entries;
4242 int i;
4243 int err;
4244
4245 msix_entries = pci_msix_vec_count(pdev);
4246 if (msix_entries < 0)
4247 return msix_entries;
4248
4249 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
4250 return -EINVAL;
4251
4252 rocker->msix_entries = kmalloc_array(msix_entries,
4253 sizeof(struct msix_entry),
4254 GFP_KERNEL);
4255 if (!rocker->msix_entries)
4256 return -ENOMEM;
4257
4258 for (i = 0; i < msix_entries; i++)
4259 rocker->msix_entries[i].entry = i;
4260
4261 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
4262 if (err < 0)
4263 goto err_enable_msix;
4264
4265 return 0;
4266
4267err_enable_msix:
4268 kfree(rocker->msix_entries);
4269 return err;
4270}
4271
4272static void rocker_msix_fini(struct rocker *rocker)
4273{
4274 pci_disable_msix(rocker->pdev);
4275 kfree(rocker->msix_entries);
4276}
4277
4278static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4279{
4280 struct rocker *rocker;
4281 int err;
4282
4283 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
4284 if (!rocker)
4285 return -ENOMEM;
4286
4287 err = pci_enable_device(pdev);
4288 if (err) {
4289 dev_err(&pdev->dev, "pci_enable_device failed\n");
4290 goto err_pci_enable_device;
4291 }
4292
4293 err = pci_request_regions(pdev, rocker_driver_name);
4294 if (err) {
4295 dev_err(&pdev->dev, "pci_request_regions failed\n");
4296 goto err_pci_request_regions;
4297 }
4298
4299 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4300 if (!err) {
4301 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4302 if (err) {
4303 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
4304 goto err_pci_set_dma_mask;
4305 }
4306 } else {
4307 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4308 if (err) {
4309 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
4310 goto err_pci_set_dma_mask;
4311 }
4312 }
4313
4314 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
4315 dev_err(&pdev->dev, "invalid PCI region size\n");
4316 goto err_pci_resource_len_check;
4317 }
4318
4319 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
4320 pci_resource_len(pdev, 0));
4321 if (!rocker->hw_addr) {
4322 dev_err(&pdev->dev, "ioremap failed\n");
4323 err = -EIO;
4324 goto err_ioremap;
4325 }
4326 pci_set_master(pdev);
4327
4328 rocker->pdev = pdev;
4329 pci_set_drvdata(pdev, rocker);
4330
4331 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
4332
4333 err = rocker_msix_init(rocker);
4334 if (err) {
4335 dev_err(&pdev->dev, "MSI-X init failed\n");
4336 goto err_msix_init;
4337 }
4338
4339 err = rocker_basic_hw_test(rocker);
4340 if (err) {
4341 dev_err(&pdev->dev, "basic hw test failed\n");
4342 goto err_basic_hw_test;
4343 }
4344
4345 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4346
4347 err = rocker_dma_rings_init(rocker);
4348 if (err)
4349 goto err_dma_rings_init;
4350
4351 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
4352 rocker_cmd_irq_handler, 0,
4353 rocker_driver_name, rocker);
4354 if (err) {
4355 dev_err(&pdev->dev, "cannot assign cmd irq\n");
4356 goto err_request_cmd_irq;
4357 }
4358
4359 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
4360 rocker_event_irq_handler, 0,
4361 rocker_driver_name, rocker);
4362 if (err) {
4363 dev_err(&pdev->dev, "cannot assign event irq\n");
4364 goto err_request_event_irq;
4365 }
4366
4367 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
4368
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004369 err = rocker_init_tbls(rocker);
4370 if (err) {
4371 dev_err(&pdev->dev, "cannot init rocker tables\n");
4372 goto err_init_tbls;
4373 }
4374
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004375 err = rocker_probe_ports(rocker);
4376 if (err) {
4377 dev_err(&pdev->dev, "failed to probe ports\n");
4378 goto err_probe_ports;
4379 }
4380
4381 dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
4382
4383 return 0;
4384
4385err_probe_ports:
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004386 rocker_free_tbls(rocker);
4387err_init_tbls:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004388 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4389err_request_event_irq:
4390 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4391err_request_cmd_irq:
4392 rocker_dma_rings_fini(rocker);
4393err_dma_rings_init:
4394err_basic_hw_test:
4395 rocker_msix_fini(rocker);
4396err_msix_init:
4397 iounmap(rocker->hw_addr);
4398err_ioremap:
4399err_pci_resource_len_check:
4400err_pci_set_dma_mask:
4401 pci_release_regions(pdev);
4402err_pci_request_regions:
4403 pci_disable_device(pdev);
4404err_pci_enable_device:
4405 kfree(rocker);
4406 return err;
4407}
4408
4409static void rocker_remove(struct pci_dev *pdev)
4410{
4411 struct rocker *rocker = pci_get_drvdata(pdev);
4412
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004413 rocker_free_tbls(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004414 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4415 rocker_remove_ports(rocker);
4416 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4417 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4418 rocker_dma_rings_fini(rocker);
4419 rocker_msix_fini(rocker);
4420 iounmap(rocker->hw_addr);
4421 pci_release_regions(rocker->pdev);
4422 pci_disable_device(rocker->pdev);
4423 kfree(rocker);
4424}
4425
4426static struct pci_driver rocker_pci_driver = {
4427 .name = rocker_driver_name,
4428 .id_table = rocker_pci_id_table,
4429 .probe = rocker_probe,
4430 .remove = rocker_remove,
4431};
4432
Scott Feldman6c707942014-11-28 14:34:28 +01004433/************************************
4434 * Net device notifier event handler
4435 ************************************/
4436
4437static bool rocker_port_dev_check(struct net_device *dev)
4438{
4439 return dev->netdev_ops == &rocker_port_netdev_ops;
4440}
4441
4442static int rocker_port_bridge_join(struct rocker_port *rocker_port,
4443 struct net_device *bridge)
4444{
4445 int err;
4446
4447 rocker_port_internal_vlan_id_put(rocker_port,
4448 rocker_port->dev->ifindex);
4449
4450 rocker_port->bridge_dev = bridge;
4451
4452 /* Use bridge internal VLAN ID for untagged pkts */
4453 err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4454 if (err)
4455 return err;
4456 rocker_port->internal_vlan_id =
4457 rocker_port_internal_vlan_id_get(rocker_port,
4458 bridge->ifindex);
4459 err = rocker_port_vlan(rocker_port, 0, 0);
4460
4461 return err;
4462}
4463
4464static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
4465{
4466 int err;
4467
4468 rocker_port_internal_vlan_id_put(rocker_port,
4469 rocker_port->bridge_dev->ifindex);
4470
4471 rocker_port->bridge_dev = NULL;
4472
4473 /* Use port internal VLAN ID for untagged pkts */
4474 err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4475 if (err)
4476 return err;
4477 rocker_port->internal_vlan_id =
4478 rocker_port_internal_vlan_id_get(rocker_port,
4479 rocker_port->dev->ifindex);
4480 err = rocker_port_vlan(rocker_port, 0, 0);
4481
4482 return err;
4483}
4484
4485static int rocker_port_master_changed(struct net_device *dev)
4486{
4487 struct rocker_port *rocker_port = netdev_priv(dev);
4488 struct net_device *master = netdev_master_upper_dev_get(dev);
4489 int err = 0;
4490
4491 if (master && master->rtnl_link_ops &&
4492 !strcmp(master->rtnl_link_ops->kind, "bridge"))
4493 err = rocker_port_bridge_join(rocker_port, master);
4494 else
4495 err = rocker_port_bridge_leave(rocker_port);
4496
4497 return err;
4498}
4499
4500static int rocker_netdevice_event(struct notifier_block *unused,
4501 unsigned long event, void *ptr)
4502{
4503 struct net_device *dev;
4504 int err;
4505
4506 switch (event) {
4507 case NETDEV_CHANGEUPPER:
4508 dev = netdev_notifier_info_to_dev(ptr);
4509 if (!rocker_port_dev_check(dev))
4510 return NOTIFY_DONE;
4511 err = rocker_port_master_changed(dev);
4512 if (err)
4513 netdev_warn(dev,
4514 "failed to reflect master change (err %d)\n",
4515 err);
4516 break;
4517 }
4518
4519 return NOTIFY_DONE;
4520}
4521
4522static struct notifier_block rocker_netdevice_nb __read_mostly = {
4523 .notifier_call = rocker_netdevice_event,
4524};
4525
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004526/***********************
4527 * Module init and exit
4528 ***********************/
4529
4530static int __init rocker_module_init(void)
4531{
Scott Feldman6c707942014-11-28 14:34:28 +01004532 int err;
4533
4534 register_netdevice_notifier(&rocker_netdevice_nb);
4535 err = pci_register_driver(&rocker_pci_driver);
4536 if (err)
4537 goto err_pci_register_driver;
4538 return 0;
4539
4540err_pci_register_driver:
4541 unregister_netdevice_notifier(&rocker_netdevice_nb);
4542 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004543}
4544
4545static void __exit rocker_module_exit(void)
4546{
Scott Feldman6c707942014-11-28 14:34:28 +01004547 unregister_netdevice_notifier(&rocker_netdevice_nb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004548 pci_unregister_driver(&rocker_pci_driver);
4549}
4550
4551module_init(rocker_module_init);
4552module_exit(rocker_module_exit);
4553
4554MODULE_LICENSE("GPL v2");
4555MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
4556MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
4557MODULE_DESCRIPTION("Rocker switch device driver");
4558MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);