blob: a5d1e6ea7d58428c2a60be1f152e7ce697230bc9 [file] [log] [blame]
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010019#include <linux/hashtable.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010020#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
Scott Feldman6c707942014-11-28 14:34:28 +010031#include <linux/if_bridge.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010032#include <linux/bitops.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010033#include <net/switchdev.h>
34#include <net/rtnetlink.h>
35#include <asm-generic/io-64-nonatomic-lo-hi.h>
36#include <generated/utsrelease.h>
37
38#include "rocker.h"
39
40static const char rocker_driver_name[] = "rocker";
41
42static const struct pci_device_id rocker_pci_id_table[] = {
43 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
44 {0, }
45};
46
Scott Feldman9f6bbf72014-11-28 14:34:27 +010047struct rocker_flow_tbl_key {
48 u32 priority;
49 enum rocker_of_dpa_table_id tbl_id;
50 union {
51 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080052 u32 in_pport;
53 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010054 enum rocker_of_dpa_table_id goto_tbl;
55 } ig_port;
56 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080057 u32 in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010058 __be16 vlan_id;
59 __be16 vlan_id_mask;
60 enum rocker_of_dpa_table_id goto_tbl;
61 bool untagged;
62 __be16 new_vlan_id;
63 } vlan;
64 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080065 u32 in_pport;
66 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010067 __be16 eth_type;
68 u8 eth_dst[ETH_ALEN];
69 u8 eth_dst_mask[ETH_ALEN];
70 __be16 vlan_id;
71 __be16 vlan_id_mask;
72 enum rocker_of_dpa_table_id goto_tbl;
73 bool copy_to_cpu;
74 } term_mac;
75 struct {
76 __be16 eth_type;
77 __be32 dst4;
78 __be32 dst4_mask;
79 enum rocker_of_dpa_table_id goto_tbl;
80 u32 group_id;
81 } ucast_routing;
82 struct {
83 u8 eth_dst[ETH_ALEN];
84 u8 eth_dst_mask[ETH_ALEN];
85 int has_eth_dst;
86 int has_eth_dst_mask;
87 __be16 vlan_id;
88 u32 tunnel_id;
89 enum rocker_of_dpa_table_id goto_tbl;
90 u32 group_id;
91 bool copy_to_cpu;
92 } bridge;
93 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080094 u32 in_pport;
95 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010096 u8 eth_src[ETH_ALEN];
97 u8 eth_src_mask[ETH_ALEN];
98 u8 eth_dst[ETH_ALEN];
99 u8 eth_dst_mask[ETH_ALEN];
100 __be16 eth_type;
101 __be16 vlan_id;
102 __be16 vlan_id_mask;
103 u8 ip_proto;
104 u8 ip_proto_mask;
105 u8 ip_tos;
106 u8 ip_tos_mask;
107 u32 group_id;
108 } acl;
109 };
110};
111
112struct rocker_flow_tbl_entry {
113 struct hlist_node entry;
114 u32 ref_count;
115 u64 cookie;
116 struct rocker_flow_tbl_key key;
117 u32 key_crc32; /* key */
118};
119
120struct rocker_group_tbl_entry {
121 struct hlist_node entry;
122 u32 cmd;
123 u32 group_id; /* key */
124 u16 group_count;
125 u32 *group_ids;
126 union {
127 struct {
128 u8 pop_vlan;
129 } l2_interface;
130 struct {
131 u8 eth_src[ETH_ALEN];
132 u8 eth_dst[ETH_ALEN];
133 __be16 vlan_id;
134 u32 group_id;
135 } l2_rewrite;
136 struct {
137 u8 eth_src[ETH_ALEN];
138 u8 eth_dst[ETH_ALEN];
139 __be16 vlan_id;
140 bool ttl_check;
141 u32 group_id;
142 } l3_unicast;
143 };
144};
145
146struct rocker_fdb_tbl_entry {
147 struct hlist_node entry;
148 u32 key_crc32; /* key */
149 bool learned;
150 struct rocker_fdb_tbl_key {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800151 u32 pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100152 u8 addr[ETH_ALEN];
153 __be16 vlan_id;
154 } key;
155};
156
157struct rocker_internal_vlan_tbl_entry {
158 struct hlist_node entry;
159 int ifindex; /* key */
160 u32 ref_count;
161 __be16 vlan_id;
162};
163
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100164struct rocker_desc_info {
165 char *data; /* mapped */
166 size_t data_size;
167 size_t tlv_size;
168 struct rocker_desc *desc;
169 DEFINE_DMA_UNMAP_ADDR(mapaddr);
170};
171
172struct rocker_dma_ring_info {
173 size_t size;
174 u32 head;
175 u32 tail;
176 struct rocker_desc *desc; /* mapped */
177 dma_addr_t mapaddr;
178 struct rocker_desc_info *desc_info;
179 unsigned int type;
180};
181
182struct rocker;
183
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100184enum {
185 ROCKER_CTRL_LINK_LOCAL_MCAST,
186 ROCKER_CTRL_LOCAL_ARP,
187 ROCKER_CTRL_IPV4_MCAST,
188 ROCKER_CTRL_IPV6_MCAST,
189 ROCKER_CTRL_DFLT_BRIDGING,
190 ROCKER_CTRL_MAX,
191};
192
193#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
194#define ROCKER_N_INTERNAL_VLANS 255
195#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
196#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
197
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100198struct rocker_port {
199 struct net_device *dev;
Scott Feldman6c707942014-11-28 14:34:28 +0100200 struct net_device *bridge_dev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100201 struct rocker *rocker;
202 unsigned int port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800203 u32 pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100204 __be16 internal_vlan_id;
Scott Feldman6c707942014-11-28 14:34:28 +0100205 int stp_state;
Scott Feldman5111f802014-11-28 14:34:30 +0100206 u32 brport_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100207 bool ctrls[ROCKER_CTRL_MAX];
208 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100209 struct napi_struct napi_tx;
210 struct napi_struct napi_rx;
211 struct rocker_dma_ring_info tx_ring;
212 struct rocker_dma_ring_info rx_ring;
213};
214
215struct rocker {
216 struct pci_dev *pdev;
217 u8 __iomem *hw_addr;
218 struct msix_entry *msix_entries;
219 unsigned int port_count;
220 struct rocker_port **ports;
221 struct {
222 u64 id;
223 } hw;
224 spinlock_t cmd_ring_lock;
225 struct rocker_dma_ring_info cmd_ring;
226 struct rocker_dma_ring_info event_ring;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100227 DECLARE_HASHTABLE(flow_tbl, 16);
228 spinlock_t flow_tbl_lock;
229 u64 flow_tbl_next_cookie;
230 DECLARE_HASHTABLE(group_tbl, 16);
231 spinlock_t group_tbl_lock;
232 DECLARE_HASHTABLE(fdb_tbl, 16);
233 spinlock_t fdb_tbl_lock;
234 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
235 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
236 spinlock_t internal_vlan_tbl_lock;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100237};
238
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100239static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
240static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
241static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
242static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
243static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
244static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
245static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
246static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
247static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
248
249/* Rocker priority levels for flow table entries. Higher
250 * priority match takes precedence over lower priority match.
251 */
252
253enum {
254 ROCKER_PRIORITY_UNKNOWN = 0,
255 ROCKER_PRIORITY_IG_PORT = 1,
256 ROCKER_PRIORITY_VLAN = 1,
257 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
258 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
259 ROCKER_PRIORITY_UNICAST_ROUTING = 1,
260 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
261 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
262 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
263 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
264 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
265 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
266 ROCKER_PRIORITY_ACL_CTRL = 3,
267 ROCKER_PRIORITY_ACL_NORMAL = 2,
268 ROCKER_PRIORITY_ACL_DFLT = 1,
269};
270
271static bool rocker_vlan_id_is_internal(__be16 vlan_id)
272{
273 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
274 u16 end = 0xffe;
275 u16 _vlan_id = ntohs(vlan_id);
276
277 return (_vlan_id >= start && _vlan_id <= end);
278}
279
280static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
281 u16 vid, bool *pop_vlan)
282{
283 __be16 vlan_id;
284
285 if (pop_vlan)
286 *pop_vlan = false;
287 vlan_id = htons(vid);
288 if (!vlan_id) {
289 vlan_id = rocker_port->internal_vlan_id;
290 if (pop_vlan)
291 *pop_vlan = true;
292 }
293
294 return vlan_id;
295}
296
Scott Feldman6c707942014-11-28 14:34:28 +0100297static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
298 __be16 vlan_id)
299{
300 if (rocker_vlan_id_is_internal(vlan_id))
301 return 0;
302
303 return ntohs(vlan_id);
304}
305
306static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
307{
308 return !!rocker_port->bridge_dev;
309}
310
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100311struct rocker_wait {
312 wait_queue_head_t wait;
313 bool done;
314 bool nowait;
315};
316
317static void rocker_wait_reset(struct rocker_wait *wait)
318{
319 wait->done = false;
320 wait->nowait = false;
321}
322
323static void rocker_wait_init(struct rocker_wait *wait)
324{
325 init_waitqueue_head(&wait->wait);
326 rocker_wait_reset(wait);
327}
328
329static struct rocker_wait *rocker_wait_create(gfp_t gfp)
330{
331 struct rocker_wait *wait;
332
333 wait = kmalloc(sizeof(*wait), gfp);
334 if (!wait)
335 return NULL;
336 rocker_wait_init(wait);
337 return wait;
338}
339
340static void rocker_wait_destroy(struct rocker_wait *work)
341{
342 kfree(work);
343}
344
345static bool rocker_wait_event_timeout(struct rocker_wait *wait,
346 unsigned long timeout)
347{
348 wait_event_timeout(wait->wait, wait->done, HZ / 10);
349 if (!wait->done)
350 return false;
351 return true;
352}
353
354static void rocker_wait_wake_up(struct rocker_wait *wait)
355{
356 wait->done = true;
357 wake_up(&wait->wait);
358}
359
360static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
361{
362 return rocker->msix_entries[vector].vector;
363}
364
365static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
366{
367 return rocker_msix_vector(rocker_port->rocker,
368 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
369}
370
371static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
372{
373 return rocker_msix_vector(rocker_port->rocker,
374 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
375}
376
377#define rocker_write32(rocker, reg, val) \
378 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
379#define rocker_read32(rocker, reg) \
380 readl((rocker)->hw_addr + (ROCKER_ ## reg))
381#define rocker_write64(rocker, reg, val) \
382 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
383#define rocker_read64(rocker, reg) \
384 readq((rocker)->hw_addr + (ROCKER_ ## reg))
385
386/*****************************
387 * HW basic testing functions
388 *****************************/
389
390static int rocker_reg_test(struct rocker *rocker)
391{
392 struct pci_dev *pdev = rocker->pdev;
393 u64 test_reg;
394 u64 rnd;
395
396 rnd = prandom_u32();
397 rnd >>= 1;
398 rocker_write32(rocker, TEST_REG, rnd);
399 test_reg = rocker_read32(rocker, TEST_REG);
400 if (test_reg != rnd * 2) {
401 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
402 test_reg, rnd * 2);
403 return -EIO;
404 }
405
406 rnd = prandom_u32();
407 rnd <<= 31;
408 rnd |= prandom_u32();
409 rocker_write64(rocker, TEST_REG64, rnd);
410 test_reg = rocker_read64(rocker, TEST_REG64);
411 if (test_reg != rnd * 2) {
412 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
413 test_reg, rnd * 2);
414 return -EIO;
415 }
416
417 return 0;
418}
419
420static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
421 u32 test_type, dma_addr_t dma_handle,
422 unsigned char *buf, unsigned char *expect,
423 size_t size)
424{
425 struct pci_dev *pdev = rocker->pdev;
426 int i;
427
428 rocker_wait_reset(wait);
429 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
430
431 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
432 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
433 return -EIO;
434 }
435
436 for (i = 0; i < size; i++) {
437 if (buf[i] != expect[i]) {
438 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
439 buf[i], i, expect[i]);
440 return -EIO;
441 }
442 }
443 return 0;
444}
445
446#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
447#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
448
449static int rocker_dma_test_offset(struct rocker *rocker,
450 struct rocker_wait *wait, int offset)
451{
452 struct pci_dev *pdev = rocker->pdev;
453 unsigned char *alloc;
454 unsigned char *buf;
455 unsigned char *expect;
456 dma_addr_t dma_handle;
457 int i;
458 int err;
459
460 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
461 GFP_KERNEL | GFP_DMA);
462 if (!alloc)
463 return -ENOMEM;
464 buf = alloc + offset;
465 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
466
467 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
468 PCI_DMA_BIDIRECTIONAL);
469 if (pci_dma_mapping_error(pdev, dma_handle)) {
470 err = -EIO;
471 goto free_alloc;
472 }
473
474 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
475 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
476
477 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
478 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
479 dma_handle, buf, expect,
480 ROCKER_TEST_DMA_BUF_SIZE);
481 if (err)
482 goto unmap;
483
484 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
485 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
486 dma_handle, buf, expect,
487 ROCKER_TEST_DMA_BUF_SIZE);
488 if (err)
489 goto unmap;
490
491 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
492 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
493 expect[i] = ~buf[i];
494 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
495 dma_handle, buf, expect,
496 ROCKER_TEST_DMA_BUF_SIZE);
497 if (err)
498 goto unmap;
499
500unmap:
501 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
502 PCI_DMA_BIDIRECTIONAL);
503free_alloc:
504 kfree(alloc);
505
506 return err;
507}
508
509static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
510{
511 int i;
512 int err;
513
514 for (i = 0; i < 8; i++) {
515 err = rocker_dma_test_offset(rocker, wait, i);
516 if (err)
517 return err;
518 }
519 return 0;
520}
521
522static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
523{
524 struct rocker_wait *wait = dev_id;
525
526 rocker_wait_wake_up(wait);
527
528 return IRQ_HANDLED;
529}
530
531static int rocker_basic_hw_test(struct rocker *rocker)
532{
533 struct pci_dev *pdev = rocker->pdev;
534 struct rocker_wait wait;
535 int err;
536
537 err = rocker_reg_test(rocker);
538 if (err) {
539 dev_err(&pdev->dev, "reg test failed\n");
540 return err;
541 }
542
543 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
544 rocker_test_irq_handler, 0,
545 rocker_driver_name, &wait);
546 if (err) {
547 dev_err(&pdev->dev, "cannot assign test irq\n");
548 return err;
549 }
550
551 rocker_wait_init(&wait);
552 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
553
554 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
555 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
556 err = -EIO;
557 goto free_irq;
558 }
559
560 err = rocker_dma_test(rocker, &wait);
561 if (err)
562 dev_err(&pdev->dev, "dma test failed\n");
563
564free_irq:
565 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
566 return err;
567}
568
569/******
570 * TLV
571 ******/
572
573#define ROCKER_TLV_ALIGNTO 8U
574#define ROCKER_TLV_ALIGN(len) \
575 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
576#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
577
578/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
579 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
580 * | Header | Pad | Payload | Pad |
581 * | (struct rocker_tlv) | ing | | ing |
582 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
583 * <--------------------------- tlv->len -------------------------->
584 */
585
586static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
587 int *remaining)
588{
589 int totlen = ROCKER_TLV_ALIGN(tlv->len);
590
591 *remaining -= totlen;
592 return (struct rocker_tlv *) ((char *) tlv + totlen);
593}
594
595static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
596{
597 return remaining >= (int) ROCKER_TLV_HDRLEN &&
598 tlv->len >= ROCKER_TLV_HDRLEN &&
599 tlv->len <= remaining;
600}
601
602#define rocker_tlv_for_each(pos, head, len, rem) \
603 for (pos = head, rem = len; \
604 rocker_tlv_ok(pos, rem); \
605 pos = rocker_tlv_next(pos, &(rem)))
606
607#define rocker_tlv_for_each_nested(pos, tlv, rem) \
608 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
609 rocker_tlv_len(tlv), rem)
610
611static int rocker_tlv_attr_size(int payload)
612{
613 return ROCKER_TLV_HDRLEN + payload;
614}
615
616static int rocker_tlv_total_size(int payload)
617{
618 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
619}
620
621static int rocker_tlv_padlen(int payload)
622{
623 return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
624}
625
626static int rocker_tlv_type(const struct rocker_tlv *tlv)
627{
628 return tlv->type;
629}
630
631static void *rocker_tlv_data(const struct rocker_tlv *tlv)
632{
633 return (char *) tlv + ROCKER_TLV_HDRLEN;
634}
635
636static int rocker_tlv_len(const struct rocker_tlv *tlv)
637{
638 return tlv->len - ROCKER_TLV_HDRLEN;
639}
640
641static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
642{
643 return *(u8 *) rocker_tlv_data(tlv);
644}
645
646static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
647{
648 return *(u16 *) rocker_tlv_data(tlv);
649}
650
Jiri Pirko9b03c712014-12-03 14:14:53 +0100651static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
652{
653 return *(__be16 *) rocker_tlv_data(tlv);
654}
655
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100656static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
657{
658 return *(u32 *) rocker_tlv_data(tlv);
659}
660
661static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
662{
663 return *(u64 *) rocker_tlv_data(tlv);
664}
665
666static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
667 const char *buf, int buf_len)
668{
669 const struct rocker_tlv *tlv;
670 const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
671 int rem;
672
673 memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
674
675 rocker_tlv_for_each(tlv, head, buf_len, rem) {
676 u32 type = rocker_tlv_type(tlv);
677
678 if (type > 0 && type <= maxtype)
679 tb[type] = (struct rocker_tlv *) tlv;
680 }
681}
682
683static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
684 const struct rocker_tlv *tlv)
685{
686 rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
687 rocker_tlv_len(tlv));
688}
689
690static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
691 struct rocker_desc_info *desc_info)
692{
693 rocker_tlv_parse(tb, maxtype, desc_info->data,
694 desc_info->desc->tlv_size);
695}
696
697static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
698{
699 return (struct rocker_tlv *) ((char *) desc_info->data +
700 desc_info->tlv_size);
701}
702
703static int rocker_tlv_put(struct rocker_desc_info *desc_info,
704 int attrtype, int attrlen, const void *data)
705{
706 int tail_room = desc_info->data_size - desc_info->tlv_size;
707 int total_size = rocker_tlv_total_size(attrlen);
708 struct rocker_tlv *tlv;
709
710 if (unlikely(tail_room < total_size))
711 return -EMSGSIZE;
712
713 tlv = rocker_tlv_start(desc_info);
714 desc_info->tlv_size += total_size;
715 tlv->type = attrtype;
716 tlv->len = rocker_tlv_attr_size(attrlen);
717 memcpy(rocker_tlv_data(tlv), data, attrlen);
718 memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
719 return 0;
720}
721
722static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
723 int attrtype, u8 value)
724{
725 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
726}
727
728static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
729 int attrtype, u16 value)
730{
731 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
732}
733
Jiri Pirko9b03c712014-12-03 14:14:53 +0100734static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
735 int attrtype, __be16 value)
736{
737 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
738}
739
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100740static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
741 int attrtype, u32 value)
742{
743 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
744}
745
Jiri Pirko9b03c712014-12-03 14:14:53 +0100746static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
747 int attrtype, __be32 value)
748{
749 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
750}
751
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100752static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
753 int attrtype, u64 value)
754{
755 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
756}
757
758static struct rocker_tlv *
759rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
760{
761 struct rocker_tlv *start = rocker_tlv_start(desc_info);
762
763 if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
764 return NULL;
765
766 return start;
767}
768
769static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
770 struct rocker_tlv *start)
771{
772 start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
773}
774
775static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
776 struct rocker_tlv *start)
777{
778 desc_info->tlv_size = (char *) start - desc_info->data;
779}
780
781/******************************************
782 * DMA rings and descriptors manipulations
783 ******************************************/
784
785static u32 __pos_inc(u32 pos, size_t limit)
786{
787 return ++pos == limit ? 0 : pos;
788}
789
790static int rocker_desc_err(struct rocker_desc_info *desc_info)
791{
Scott Feldman7eb344f2015-02-25 20:15:36 -0800792 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
793
794 switch (err) {
795 case ROCKER_OK:
796 return 0;
797 case -ROCKER_ENOENT:
798 return -ENOENT;
799 case -ROCKER_ENXIO:
800 return -ENXIO;
801 case -ROCKER_ENOMEM:
802 return -ENOMEM;
803 case -ROCKER_EEXIST:
804 return -EEXIST;
805 case -ROCKER_EINVAL:
806 return -EINVAL;
807 case -ROCKER_EMSGSIZE:
808 return -EMSGSIZE;
809 case -ROCKER_ENOTSUP:
810 return -EOPNOTSUPP;
811 case -ROCKER_ENOBUFS:
812 return -ENOBUFS;
813 }
814
815 return -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100816}
817
818static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
819{
820 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
821}
822
823static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
824{
825 u32 comp_err = desc_info->desc->comp_err;
826
827 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
828}
829
830static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
831{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100832 return (void *)(uintptr_t)desc_info->desc->cookie;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100833}
834
835static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
836 void *ptr)
837{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100838 desc_info->desc->cookie = (uintptr_t) ptr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100839}
840
841static struct rocker_desc_info *
842rocker_desc_head_get(struct rocker_dma_ring_info *info)
843{
844 static struct rocker_desc_info *desc_info;
845 u32 head = __pos_inc(info->head, info->size);
846
847 desc_info = &info->desc_info[info->head];
848 if (head == info->tail)
849 return NULL; /* ring full */
850 desc_info->tlv_size = 0;
851 return desc_info;
852}
853
854static void rocker_desc_commit(struct rocker_desc_info *desc_info)
855{
856 desc_info->desc->buf_size = desc_info->data_size;
857 desc_info->desc->tlv_size = desc_info->tlv_size;
858}
859
860static void rocker_desc_head_set(struct rocker *rocker,
861 struct rocker_dma_ring_info *info,
862 struct rocker_desc_info *desc_info)
863{
864 u32 head = __pos_inc(info->head, info->size);
865
866 BUG_ON(head == info->tail);
867 rocker_desc_commit(desc_info);
868 info->head = head;
869 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
870}
871
872static struct rocker_desc_info *
873rocker_desc_tail_get(struct rocker_dma_ring_info *info)
874{
875 static struct rocker_desc_info *desc_info;
876
877 if (info->tail == info->head)
878 return NULL; /* nothing to be done between head and tail */
879 desc_info = &info->desc_info[info->tail];
880 if (!rocker_desc_gen(desc_info))
881 return NULL; /* gen bit not set, desc is not ready yet */
882 info->tail = __pos_inc(info->tail, info->size);
883 desc_info->tlv_size = desc_info->desc->tlv_size;
884 return desc_info;
885}
886
887static void rocker_dma_ring_credits_set(struct rocker *rocker,
888 struct rocker_dma_ring_info *info,
889 u32 credits)
890{
891 if (credits)
892 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
893}
894
895static unsigned long rocker_dma_ring_size_fix(size_t size)
896{
897 return max(ROCKER_DMA_SIZE_MIN,
898 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
899}
900
901static int rocker_dma_ring_create(struct rocker *rocker,
902 unsigned int type,
903 size_t size,
904 struct rocker_dma_ring_info *info)
905{
906 int i;
907
908 BUG_ON(size != rocker_dma_ring_size_fix(size));
909 info->size = size;
910 info->type = type;
911 info->head = 0;
912 info->tail = 0;
913 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
914 GFP_KERNEL);
915 if (!info->desc_info)
916 return -ENOMEM;
917
918 info->desc = pci_alloc_consistent(rocker->pdev,
919 info->size * sizeof(*info->desc),
920 &info->mapaddr);
921 if (!info->desc) {
922 kfree(info->desc_info);
923 return -ENOMEM;
924 }
925
926 for (i = 0; i < info->size; i++)
927 info->desc_info[i].desc = &info->desc[i];
928
929 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
930 ROCKER_DMA_DESC_CTRL_RESET);
931 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
932 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
933
934 return 0;
935}
936
937static void rocker_dma_ring_destroy(struct rocker *rocker,
938 struct rocker_dma_ring_info *info)
939{
940 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
941
942 pci_free_consistent(rocker->pdev,
943 info->size * sizeof(struct rocker_desc),
944 info->desc, info->mapaddr);
945 kfree(info->desc_info);
946}
947
948static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
949 struct rocker_dma_ring_info *info)
950{
951 int i;
952
953 BUG_ON(info->head || info->tail);
954
955 /* When ring is consumer, we need to advance head for each desc.
956 * That tells hw that the desc is ready to be used by it.
957 */
958 for (i = 0; i < info->size - 1; i++)
959 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
960 rocker_desc_commit(&info->desc_info[i]);
961}
962
963static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
964 struct rocker_dma_ring_info *info,
965 int direction, size_t buf_size)
966{
967 struct pci_dev *pdev = rocker->pdev;
968 int i;
969 int err;
970
971 for (i = 0; i < info->size; i++) {
972 struct rocker_desc_info *desc_info = &info->desc_info[i];
973 struct rocker_desc *desc = &info->desc[i];
974 dma_addr_t dma_handle;
975 char *buf;
976
977 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
978 if (!buf) {
979 err = -ENOMEM;
980 goto rollback;
981 }
982
983 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
984 if (pci_dma_mapping_error(pdev, dma_handle)) {
985 kfree(buf);
986 err = -EIO;
987 goto rollback;
988 }
989
990 desc_info->data = buf;
991 desc_info->data_size = buf_size;
992 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
993
994 desc->buf_addr = dma_handle;
995 desc->buf_size = buf_size;
996 }
997 return 0;
998
999rollback:
1000 for (i--; i >= 0; i--) {
1001 struct rocker_desc_info *desc_info = &info->desc_info[i];
1002
1003 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1004 desc_info->data_size, direction);
1005 kfree(desc_info->data);
1006 }
1007 return err;
1008}
1009
1010static void rocker_dma_ring_bufs_free(struct rocker *rocker,
1011 struct rocker_dma_ring_info *info,
1012 int direction)
1013{
1014 struct pci_dev *pdev = rocker->pdev;
1015 int i;
1016
1017 for (i = 0; i < info->size; i++) {
1018 struct rocker_desc_info *desc_info = &info->desc_info[i];
1019 struct rocker_desc *desc = &info->desc[i];
1020
1021 desc->buf_addr = 0;
1022 desc->buf_size = 0;
1023 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1024 desc_info->data_size, direction);
1025 kfree(desc_info->data);
1026 }
1027}
1028
1029static int rocker_dma_rings_init(struct rocker *rocker)
1030{
1031 struct pci_dev *pdev = rocker->pdev;
1032 int err;
1033
1034 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1035 ROCKER_DMA_CMD_DEFAULT_SIZE,
1036 &rocker->cmd_ring);
1037 if (err) {
1038 dev_err(&pdev->dev, "failed to create command dma ring\n");
1039 return err;
1040 }
1041
1042 spin_lock_init(&rocker->cmd_ring_lock);
1043
1044 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1045 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1046 if (err) {
1047 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1048 goto err_dma_cmd_ring_bufs_alloc;
1049 }
1050
1051 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1052 ROCKER_DMA_EVENT_DEFAULT_SIZE,
1053 &rocker->event_ring);
1054 if (err) {
1055 dev_err(&pdev->dev, "failed to create event dma ring\n");
1056 goto err_dma_event_ring_create;
1057 }
1058
1059 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1060 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1061 if (err) {
1062 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1063 goto err_dma_event_ring_bufs_alloc;
1064 }
1065 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1066 return 0;
1067
1068err_dma_event_ring_bufs_alloc:
1069 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1070err_dma_event_ring_create:
1071 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1072 PCI_DMA_BIDIRECTIONAL);
1073err_dma_cmd_ring_bufs_alloc:
1074 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1075 return err;
1076}
1077
1078static void rocker_dma_rings_fini(struct rocker *rocker)
1079{
1080 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1081 PCI_DMA_BIDIRECTIONAL);
1082 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1083 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1084 PCI_DMA_BIDIRECTIONAL);
1085 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1086}
1087
1088static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
1089 struct rocker_port *rocker_port,
1090 struct rocker_desc_info *desc_info,
1091 struct sk_buff *skb, size_t buf_len)
1092{
1093 struct pci_dev *pdev = rocker->pdev;
1094 dma_addr_t dma_handle;
1095
1096 dma_handle = pci_map_single(pdev, skb->data, buf_len,
1097 PCI_DMA_FROMDEVICE);
1098 if (pci_dma_mapping_error(pdev, dma_handle))
1099 return -EIO;
1100 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1101 goto tlv_put_failure;
1102 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1103 goto tlv_put_failure;
1104 return 0;
1105
1106tlv_put_failure:
1107 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1108 desc_info->tlv_size = 0;
1109 return -EMSGSIZE;
1110}
1111
1112static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
1113{
1114 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1115}
1116
1117static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
1118 struct rocker_port *rocker_port,
1119 struct rocker_desc_info *desc_info)
1120{
1121 struct net_device *dev = rocker_port->dev;
1122 struct sk_buff *skb;
1123 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1124 int err;
1125
1126 /* Ensure that hw will see tlv_size zero in case of an error.
1127 * That tells hw to use another descriptor.
1128 */
1129 rocker_desc_cookie_ptr_set(desc_info, NULL);
1130 desc_info->tlv_size = 0;
1131
1132 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1133 if (!skb)
1134 return -ENOMEM;
1135 err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
1136 skb, buf_len);
1137 if (err) {
1138 dev_kfree_skb_any(skb);
1139 return err;
1140 }
1141 rocker_desc_cookie_ptr_set(desc_info, skb);
1142 return 0;
1143}
1144
1145static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
1146 struct rocker_tlv **attrs)
1147{
1148 struct pci_dev *pdev = rocker->pdev;
1149 dma_addr_t dma_handle;
1150 size_t len;
1151
1152 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1153 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1154 return;
1155 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1156 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1157 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1158}
1159
1160static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
1161 struct rocker_desc_info *desc_info)
1162{
1163 struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1164 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1165
1166 if (!skb)
1167 return;
1168 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1169 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1170 dev_kfree_skb_any(skb);
1171}
1172
1173static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
1174 struct rocker_port *rocker_port)
1175{
1176 struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1177 int i;
1178 int err;
1179
1180 for (i = 0; i < rx_ring->size; i++) {
1181 err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
1182 &rx_ring->desc_info[i]);
1183 if (err)
1184 goto rollback;
1185 }
1186 return 0;
1187
1188rollback:
1189 for (i--; i >= 0; i--)
1190 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1191 return err;
1192}
1193
1194static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
1195 struct rocker_port *rocker_port)
1196{
1197 struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1198 int i;
1199
1200 for (i = 0; i < rx_ring->size; i++)
1201 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1202}
1203
1204static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1205{
1206 struct rocker *rocker = rocker_port->rocker;
1207 int err;
1208
1209 err = rocker_dma_ring_create(rocker,
1210 ROCKER_DMA_TX(rocker_port->port_number),
1211 ROCKER_DMA_TX_DEFAULT_SIZE,
1212 &rocker_port->tx_ring);
1213 if (err) {
1214 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1215 return err;
1216 }
1217
1218 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1219 PCI_DMA_TODEVICE,
1220 ROCKER_DMA_TX_DESC_SIZE);
1221 if (err) {
1222 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1223 goto err_dma_tx_ring_bufs_alloc;
1224 }
1225
1226 err = rocker_dma_ring_create(rocker,
1227 ROCKER_DMA_RX(rocker_port->port_number),
1228 ROCKER_DMA_RX_DEFAULT_SIZE,
1229 &rocker_port->rx_ring);
1230 if (err) {
1231 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1232 goto err_dma_rx_ring_create;
1233 }
1234
1235 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1236 PCI_DMA_BIDIRECTIONAL,
1237 ROCKER_DMA_RX_DESC_SIZE);
1238 if (err) {
1239 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1240 goto err_dma_rx_ring_bufs_alloc;
1241 }
1242
1243 err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
1244 if (err) {
1245 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1246 goto err_dma_rx_ring_skbs_alloc;
1247 }
1248 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1249
1250 return 0;
1251
1252err_dma_rx_ring_skbs_alloc:
1253 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1254 PCI_DMA_BIDIRECTIONAL);
1255err_dma_rx_ring_bufs_alloc:
1256 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1257err_dma_rx_ring_create:
1258 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1259 PCI_DMA_TODEVICE);
1260err_dma_tx_ring_bufs_alloc:
1261 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1262 return err;
1263}
1264
1265static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1266{
1267 struct rocker *rocker = rocker_port->rocker;
1268
1269 rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
1270 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1271 PCI_DMA_BIDIRECTIONAL);
1272 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1273 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1274 PCI_DMA_TODEVICE);
1275 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1276}
1277
1278static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1279{
1280 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1281
1282 if (enable)
David S. Miller71a83a62015-03-03 21:16:48 -05001283 val |= 1ULL << rocker_port->pport;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001284 else
David S. Miller71a83a62015-03-03 21:16:48 -05001285 val &= ~(1ULL << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001286 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1287}
1288
1289/********************************
1290 * Interrupt handler and helpers
1291 ********************************/
1292
1293static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1294{
1295 struct rocker *rocker = dev_id;
1296 struct rocker_desc_info *desc_info;
1297 struct rocker_wait *wait;
1298 u32 credits = 0;
1299
1300 spin_lock(&rocker->cmd_ring_lock);
1301 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1302 wait = rocker_desc_cookie_ptr_get(desc_info);
1303 if (wait->nowait) {
1304 rocker_desc_gen_clear(desc_info);
1305 rocker_wait_destroy(wait);
1306 } else {
1307 rocker_wait_wake_up(wait);
1308 }
1309 credits++;
1310 }
1311 spin_unlock(&rocker->cmd_ring_lock);
1312 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1313
1314 return IRQ_HANDLED;
1315}
1316
1317static void rocker_port_link_up(struct rocker_port *rocker_port)
1318{
1319 netif_carrier_on(rocker_port->dev);
1320 netdev_info(rocker_port->dev, "Link is up\n");
1321}
1322
1323static void rocker_port_link_down(struct rocker_port *rocker_port)
1324{
1325 netif_carrier_off(rocker_port->dev);
1326 netdev_info(rocker_port->dev, "Link is down\n");
1327}
1328
1329static int rocker_event_link_change(struct rocker *rocker,
1330 const struct rocker_tlv *info)
1331{
1332 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1333 unsigned int port_number;
1334 bool link_up;
1335 struct rocker_port *rocker_port;
1336
1337 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001338 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001339 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1340 return -EIO;
1341 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001342 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001343 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1344
1345 if (port_number >= rocker->port_count)
1346 return -EINVAL;
1347
1348 rocker_port = rocker->ports[port_number];
1349 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1350 if (link_up)
1351 rocker_port_link_up(rocker_port);
1352 else
1353 rocker_port_link_down(rocker_port);
1354 }
1355
1356 return 0;
1357}
1358
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001359#define ROCKER_OP_FLAG_REMOVE BIT(0)
1360#define ROCKER_OP_FLAG_NOWAIT BIT(1)
1361#define ROCKER_OP_FLAG_LEARNED BIT(2)
Scott Feldman6c707942014-11-28 14:34:28 +01001362#define ROCKER_OP_FLAG_REFRESH BIT(3)
1363
1364static int rocker_port_fdb(struct rocker_port *rocker_port,
1365 const unsigned char *addr,
1366 __be16 vlan_id, int flags);
1367
1368static int rocker_event_mac_vlan_seen(struct rocker *rocker,
1369 const struct rocker_tlv *info)
1370{
1371 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1372 unsigned int port_number;
1373 struct rocker_port *rocker_port;
1374 unsigned char *addr;
1375 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1376 __be16 vlan_id;
1377
1378 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001379 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
Scott Feldman6c707942014-11-28 14:34:28 +01001380 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1381 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1382 return -EIO;
1383 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001384 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
Scott Feldman6c707942014-11-28 14:34:28 +01001385 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
Jiri Pirko9b03c712014-12-03 14:14:53 +01001386 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
Scott Feldman6c707942014-11-28 14:34:28 +01001387
1388 if (port_number >= rocker->port_count)
1389 return -EINVAL;
1390
1391 rocker_port = rocker->ports[port_number];
1392
1393 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1394 rocker_port->stp_state != BR_STATE_FORWARDING)
1395 return 0;
1396
1397 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
1398}
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001399
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001400static int rocker_event_process(struct rocker *rocker,
1401 struct rocker_desc_info *desc_info)
1402{
1403 struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1404 struct rocker_tlv *info;
1405 u16 type;
1406
1407 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1408 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1409 !attrs[ROCKER_TLV_EVENT_INFO])
1410 return -EIO;
1411
1412 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1413 info = attrs[ROCKER_TLV_EVENT_INFO];
1414
1415 switch (type) {
1416 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1417 return rocker_event_link_change(rocker, info);
Scott Feldman6c707942014-11-28 14:34:28 +01001418 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1419 return rocker_event_mac_vlan_seen(rocker, info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001420 }
1421
1422 return -EOPNOTSUPP;
1423}
1424
1425static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1426{
1427 struct rocker *rocker = dev_id;
1428 struct pci_dev *pdev = rocker->pdev;
1429 struct rocker_desc_info *desc_info;
1430 u32 credits = 0;
1431 int err;
1432
1433 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1434 err = rocker_desc_err(desc_info);
1435 if (err) {
1436 dev_err(&pdev->dev, "event desc received with err %d\n",
1437 err);
1438 } else {
1439 err = rocker_event_process(rocker, desc_info);
1440 if (err)
1441 dev_err(&pdev->dev, "event processing failed with err %d\n",
1442 err);
1443 }
1444 rocker_desc_gen_clear(desc_info);
1445 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1446 credits++;
1447 }
1448 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1449
1450 return IRQ_HANDLED;
1451}
1452
1453static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1454{
1455 struct rocker_port *rocker_port = dev_id;
1456
1457 napi_schedule(&rocker_port->napi_tx);
1458 return IRQ_HANDLED;
1459}
1460
1461static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1462{
1463 struct rocker_port *rocker_port = dev_id;
1464
1465 napi_schedule(&rocker_port->napi_rx);
1466 return IRQ_HANDLED;
1467}
1468
1469/********************
1470 * Command interface
1471 ********************/
1472
1473typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
1474 struct rocker_port *rocker_port,
1475 struct rocker_desc_info *desc_info,
1476 void *priv);
1477
1478static int rocker_cmd_exec(struct rocker *rocker,
1479 struct rocker_port *rocker_port,
1480 rocker_cmd_cb_t prepare, void *prepare_priv,
1481 rocker_cmd_cb_t process, void *process_priv,
1482 bool nowait)
1483{
1484 struct rocker_desc_info *desc_info;
1485 struct rocker_wait *wait;
1486 unsigned long flags;
1487 int err;
1488
1489 wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
1490 if (!wait)
1491 return -ENOMEM;
1492 wait->nowait = nowait;
1493
1494 spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
1495 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1496 if (!desc_info) {
1497 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1498 err = -EAGAIN;
1499 goto out;
1500 }
1501 err = prepare(rocker, rocker_port, desc_info, prepare_priv);
1502 if (err) {
1503 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1504 goto out;
1505 }
1506 rocker_desc_cookie_ptr_set(desc_info, wait);
1507 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1508 spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1509
1510 if (nowait)
1511 return 0;
1512
1513 if (!rocker_wait_event_timeout(wait, HZ / 10))
1514 return -EIO;
1515
1516 err = rocker_desc_err(desc_info);
1517 if (err)
1518 return err;
1519
1520 if (process)
1521 err = process(rocker, rocker_port, desc_info, process_priv);
1522
1523 rocker_desc_gen_clear(desc_info);
1524out:
1525 rocker_wait_destroy(wait);
1526 return err;
1527}
1528
1529static int
1530rocker_cmd_get_port_settings_prep(struct rocker *rocker,
1531 struct rocker_port *rocker_port,
1532 struct rocker_desc_info *desc_info,
1533 void *priv)
1534{
1535 struct rocker_tlv *cmd_info;
1536
1537 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1538 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1539 return -EMSGSIZE;
1540 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1541 if (!cmd_info)
1542 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001543 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1544 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001545 return -EMSGSIZE;
1546 rocker_tlv_nest_end(desc_info, cmd_info);
1547 return 0;
1548}
1549
1550static int
1551rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
1552 struct rocker_port *rocker_port,
1553 struct rocker_desc_info *desc_info,
1554 void *priv)
1555{
1556 struct ethtool_cmd *ecmd = priv;
1557 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1558 struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1559 u32 speed;
1560 u8 duplex;
1561 u8 autoneg;
1562
1563 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1564 if (!attrs[ROCKER_TLV_CMD_INFO])
1565 return -EIO;
1566
1567 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1568 attrs[ROCKER_TLV_CMD_INFO]);
1569 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1570 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1571 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1572 return -EIO;
1573
1574 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1575 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1576 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1577
1578 ecmd->transceiver = XCVR_INTERNAL;
1579 ecmd->supported = SUPPORTED_TP;
1580 ecmd->phy_address = 0xff;
1581 ecmd->port = PORT_TP;
1582 ethtool_cmd_speed_set(ecmd, speed);
1583 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1584 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1585
1586 return 0;
1587}
1588
1589static int
1590rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
1591 struct rocker_port *rocker_port,
1592 struct rocker_desc_info *desc_info,
1593 void *priv)
1594{
1595 unsigned char *macaddr = priv;
1596 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1597 struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1598 struct rocker_tlv *attr;
1599
1600 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1601 if (!attrs[ROCKER_TLV_CMD_INFO])
1602 return -EIO;
1603
1604 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1605 attrs[ROCKER_TLV_CMD_INFO]);
1606 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1607 if (!attr)
1608 return -EIO;
1609
1610 if (rocker_tlv_len(attr) != ETH_ALEN)
1611 return -EINVAL;
1612
1613 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1614 return 0;
1615}
1616
1617static int
1618rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
1619 struct rocker_port *rocker_port,
1620 struct rocker_desc_info *desc_info,
1621 void *priv)
1622{
1623 struct ethtool_cmd *ecmd = priv;
1624 struct rocker_tlv *cmd_info;
1625
1626 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1627 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1628 return -EMSGSIZE;
1629 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1630 if (!cmd_info)
1631 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001632 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1633 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001634 return -EMSGSIZE;
1635 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1636 ethtool_cmd_speed(ecmd)))
1637 return -EMSGSIZE;
1638 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1639 ecmd->duplex))
1640 return -EMSGSIZE;
1641 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1642 ecmd->autoneg))
1643 return -EMSGSIZE;
1644 rocker_tlv_nest_end(desc_info, cmd_info);
1645 return 0;
1646}
1647
1648static int
1649rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
1650 struct rocker_port *rocker_port,
1651 struct rocker_desc_info *desc_info,
1652 void *priv)
1653{
1654 unsigned char *macaddr = priv;
1655 struct rocker_tlv *cmd_info;
1656
1657 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1658 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1659 return -EMSGSIZE;
1660 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1661 if (!cmd_info)
1662 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001663 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1664 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001665 return -EMSGSIZE;
1666 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1667 ETH_ALEN, macaddr))
1668 return -EMSGSIZE;
1669 rocker_tlv_nest_end(desc_info, cmd_info);
1670 return 0;
1671}
1672
Scott Feldman5111f802014-11-28 14:34:30 +01001673static int
1674rocker_cmd_set_port_learning_prep(struct rocker *rocker,
1675 struct rocker_port *rocker_port,
1676 struct rocker_desc_info *desc_info,
1677 void *priv)
1678{
1679 struct rocker_tlv *cmd_info;
1680
1681 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1682 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1683 return -EMSGSIZE;
1684 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1685 if (!cmd_info)
1686 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001687 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1688 rocker_port->pport))
Scott Feldman5111f802014-11-28 14:34:30 +01001689 return -EMSGSIZE;
1690 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1691 !!(rocker_port->brport_flags & BR_LEARNING)))
1692 return -EMSGSIZE;
1693 rocker_tlv_nest_end(desc_info, cmd_info);
1694 return 0;
1695}
1696
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001697static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1698 struct ethtool_cmd *ecmd)
1699{
1700 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1701 rocker_cmd_get_port_settings_prep, NULL,
1702 rocker_cmd_get_port_settings_ethtool_proc,
1703 ecmd, false);
1704}
1705
1706static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1707 unsigned char *macaddr)
1708{
1709 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1710 rocker_cmd_get_port_settings_prep, NULL,
1711 rocker_cmd_get_port_settings_macaddr_proc,
1712 macaddr, false);
1713}
1714
1715static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1716 struct ethtool_cmd *ecmd)
1717{
1718 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1719 rocker_cmd_set_port_settings_ethtool_prep,
1720 ecmd, NULL, NULL, false);
1721}
1722
1723static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1724 unsigned char *macaddr)
1725{
1726 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1727 rocker_cmd_set_port_settings_macaddr_prep,
1728 macaddr, NULL, NULL, false);
1729}
1730
Scott Feldman5111f802014-11-28 14:34:30 +01001731static int rocker_port_set_learning(struct rocker_port *rocker_port)
1732{
1733 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1734 rocker_cmd_set_port_learning_prep,
1735 NULL, NULL, NULL, false);
1736}
1737
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001738static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1739 struct rocker_flow_tbl_entry *entry)
1740{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001741 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1742 entry->key.ig_port.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001743 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001744 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1745 entry->key.ig_port.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001746 return -EMSGSIZE;
1747 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1748 entry->key.ig_port.goto_tbl))
1749 return -EMSGSIZE;
1750
1751 return 0;
1752}
1753
1754static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1755 struct rocker_flow_tbl_entry *entry)
1756{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001757 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1758 entry->key.vlan.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001759 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001760 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1761 entry->key.vlan.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001762 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001763 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1764 entry->key.vlan.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001765 return -EMSGSIZE;
1766 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1767 entry->key.vlan.goto_tbl))
1768 return -EMSGSIZE;
1769 if (entry->key.vlan.untagged &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01001770 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1771 entry->key.vlan.new_vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001772 return -EMSGSIZE;
1773
1774 return 0;
1775}
1776
1777static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1778 struct rocker_flow_tbl_entry *entry)
1779{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001780 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1781 entry->key.term_mac.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001782 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001783 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1784 entry->key.term_mac.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001785 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001786 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1787 entry->key.term_mac.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001788 return -EMSGSIZE;
1789 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1790 ETH_ALEN, entry->key.term_mac.eth_dst))
1791 return -EMSGSIZE;
1792 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1793 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1794 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001795 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1796 entry->key.term_mac.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001797 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001798 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1799 entry->key.term_mac.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001800 return -EMSGSIZE;
1801 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1802 entry->key.term_mac.goto_tbl))
1803 return -EMSGSIZE;
1804 if (entry->key.term_mac.copy_to_cpu &&
1805 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1806 entry->key.term_mac.copy_to_cpu))
1807 return -EMSGSIZE;
1808
1809 return 0;
1810}
1811
1812static int
1813rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1814 struct rocker_flow_tbl_entry *entry)
1815{
Jiri Pirko9b03c712014-12-03 14:14:53 +01001816 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1817 entry->key.ucast_routing.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001818 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001819 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1820 entry->key.ucast_routing.dst4))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001821 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001822 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1823 entry->key.ucast_routing.dst4_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001824 return -EMSGSIZE;
1825 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1826 entry->key.ucast_routing.goto_tbl))
1827 return -EMSGSIZE;
1828 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1829 entry->key.ucast_routing.group_id))
1830 return -EMSGSIZE;
1831
1832 return 0;
1833}
1834
1835static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
1836 struct rocker_flow_tbl_entry *entry)
1837{
1838 if (entry->key.bridge.has_eth_dst &&
1839 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1840 ETH_ALEN, entry->key.bridge.eth_dst))
1841 return -EMSGSIZE;
1842 if (entry->key.bridge.has_eth_dst_mask &&
1843 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1844 ETH_ALEN, entry->key.bridge.eth_dst_mask))
1845 return -EMSGSIZE;
1846 if (entry->key.bridge.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01001847 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1848 entry->key.bridge.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001849 return -EMSGSIZE;
1850 if (entry->key.bridge.tunnel_id &&
1851 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
1852 entry->key.bridge.tunnel_id))
1853 return -EMSGSIZE;
1854 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1855 entry->key.bridge.goto_tbl))
1856 return -EMSGSIZE;
1857 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1858 entry->key.bridge.group_id))
1859 return -EMSGSIZE;
1860 if (entry->key.bridge.copy_to_cpu &&
1861 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1862 entry->key.bridge.copy_to_cpu))
1863 return -EMSGSIZE;
1864
1865 return 0;
1866}
1867
1868static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
1869 struct rocker_flow_tbl_entry *entry)
1870{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001871 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1872 entry->key.acl.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001873 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001874 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1875 entry->key.acl.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001876 return -EMSGSIZE;
1877 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1878 ETH_ALEN, entry->key.acl.eth_src))
1879 return -EMSGSIZE;
1880 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
1881 ETH_ALEN, entry->key.acl.eth_src_mask))
1882 return -EMSGSIZE;
1883 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1884 ETH_ALEN, entry->key.acl.eth_dst))
1885 return -EMSGSIZE;
1886 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1887 ETH_ALEN, entry->key.acl.eth_dst_mask))
1888 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001889 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1890 entry->key.acl.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001891 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001892 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1893 entry->key.acl.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001894 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001895 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1896 entry->key.acl.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001897 return -EMSGSIZE;
1898
1899 switch (ntohs(entry->key.acl.eth_type)) {
1900 case ETH_P_IP:
1901 case ETH_P_IPV6:
1902 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
1903 entry->key.acl.ip_proto))
1904 return -EMSGSIZE;
1905 if (rocker_tlv_put_u8(desc_info,
1906 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
1907 entry->key.acl.ip_proto_mask))
1908 return -EMSGSIZE;
1909 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
1910 entry->key.acl.ip_tos & 0x3f))
1911 return -EMSGSIZE;
1912 if (rocker_tlv_put_u8(desc_info,
1913 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
1914 entry->key.acl.ip_tos_mask & 0x3f))
1915 return -EMSGSIZE;
1916 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
1917 (entry->key.acl.ip_tos & 0xc0) >> 6))
1918 return -EMSGSIZE;
1919 if (rocker_tlv_put_u8(desc_info,
1920 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
1921 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
1922 return -EMSGSIZE;
1923 break;
1924 }
1925
1926 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
1927 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1928 entry->key.acl.group_id))
1929 return -EMSGSIZE;
1930
1931 return 0;
1932}
1933
1934static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
1935 struct rocker_port *rocker_port,
1936 struct rocker_desc_info *desc_info,
1937 void *priv)
1938{
1939 struct rocker_flow_tbl_entry *entry = priv;
1940 struct rocker_tlv *cmd_info;
1941 int err = 0;
1942
1943 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1944 ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD))
1945 return -EMSGSIZE;
1946 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1947 if (!cmd_info)
1948 return -EMSGSIZE;
1949 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
1950 entry->key.tbl_id))
1951 return -EMSGSIZE;
1952 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
1953 entry->key.priority))
1954 return -EMSGSIZE;
1955 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
1956 return -EMSGSIZE;
1957 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
1958 entry->cookie))
1959 return -EMSGSIZE;
1960
1961 switch (entry->key.tbl_id) {
1962 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
1963 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
1964 break;
1965 case ROCKER_OF_DPA_TABLE_ID_VLAN:
1966 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
1967 break;
1968 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
1969 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
1970 break;
1971 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
1972 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
1973 break;
1974 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
1975 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
1976 break;
1977 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
1978 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
1979 break;
1980 default:
1981 err = -ENOTSUPP;
1982 break;
1983 }
1984
1985 if (err)
1986 return err;
1987
1988 rocker_tlv_nest_end(desc_info, cmd_info);
1989
1990 return 0;
1991}
1992
1993static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
1994 struct rocker_port *rocker_port,
1995 struct rocker_desc_info *desc_info,
1996 void *priv)
1997{
1998 const struct rocker_flow_tbl_entry *entry = priv;
1999 struct rocker_tlv *cmd_info;
2000
2001 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
2002 ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL))
2003 return -EMSGSIZE;
2004 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2005 if (!cmd_info)
2006 return -EMSGSIZE;
2007 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2008 entry->cookie))
2009 return -EMSGSIZE;
2010 rocker_tlv_nest_end(desc_info, cmd_info);
2011
2012 return 0;
2013}
2014
2015static int
2016rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2017 struct rocker_group_tbl_entry *entry)
2018{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002019 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002020 ROCKER_GROUP_PORT_GET(entry->group_id)))
2021 return -EMSGSIZE;
2022 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2023 entry->l2_interface.pop_vlan))
2024 return -EMSGSIZE;
2025
2026 return 0;
2027}
2028
2029static int
2030rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2031 struct rocker_group_tbl_entry *entry)
2032{
2033 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2034 entry->l2_rewrite.group_id))
2035 return -EMSGSIZE;
2036 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2037 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2038 ETH_ALEN, entry->l2_rewrite.eth_src))
2039 return -EMSGSIZE;
2040 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2041 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2042 ETH_ALEN, entry->l2_rewrite.eth_dst))
2043 return -EMSGSIZE;
2044 if (entry->l2_rewrite.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002045 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2046 entry->l2_rewrite.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002047 return -EMSGSIZE;
2048
2049 return 0;
2050}
2051
2052static int
2053rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2054 struct rocker_group_tbl_entry *entry)
2055{
2056 int i;
2057 struct rocker_tlv *group_ids;
2058
2059 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2060 entry->group_count))
2061 return -EMSGSIZE;
2062
2063 group_ids = rocker_tlv_nest_start(desc_info,
2064 ROCKER_TLV_OF_DPA_GROUP_IDS);
2065 if (!group_ids)
2066 return -EMSGSIZE;
2067
2068 for (i = 0; i < entry->group_count; i++)
2069 /* Note TLV array is 1-based */
2070 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2071 return -EMSGSIZE;
2072
2073 rocker_tlv_nest_end(desc_info, group_ids);
2074
2075 return 0;
2076}
2077
2078static int
2079rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2080 struct rocker_group_tbl_entry *entry)
2081{
2082 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2083 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2084 ETH_ALEN, entry->l3_unicast.eth_src))
2085 return -EMSGSIZE;
2086 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2087 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2088 ETH_ALEN, entry->l3_unicast.eth_dst))
2089 return -EMSGSIZE;
2090 if (entry->l3_unicast.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002091 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2092 entry->l3_unicast.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002093 return -EMSGSIZE;
2094 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2095 entry->l3_unicast.ttl_check))
2096 return -EMSGSIZE;
2097 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2098 entry->l3_unicast.group_id))
2099 return -EMSGSIZE;
2100
2101 return 0;
2102}
2103
2104static int rocker_cmd_group_tbl_add(struct rocker *rocker,
2105 struct rocker_port *rocker_port,
2106 struct rocker_desc_info *desc_info,
2107 void *priv)
2108{
2109 struct rocker_group_tbl_entry *entry = priv;
2110 struct rocker_tlv *cmd_info;
2111 int err = 0;
2112
2113 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2114 return -EMSGSIZE;
2115 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2116 if (!cmd_info)
2117 return -EMSGSIZE;
2118
2119 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2120 entry->group_id))
2121 return -EMSGSIZE;
2122
2123 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2124 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2125 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2126 break;
2127 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2128 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2129 break;
2130 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2131 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2132 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2133 break;
2134 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2135 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2136 break;
2137 default:
2138 err = -ENOTSUPP;
2139 break;
2140 }
2141
2142 if (err)
2143 return err;
2144
2145 rocker_tlv_nest_end(desc_info, cmd_info);
2146
2147 return 0;
2148}
2149
2150static int rocker_cmd_group_tbl_del(struct rocker *rocker,
2151 struct rocker_port *rocker_port,
2152 struct rocker_desc_info *desc_info,
2153 void *priv)
2154{
2155 const struct rocker_group_tbl_entry *entry = priv;
2156 struct rocker_tlv *cmd_info;
2157
2158 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2159 return -EMSGSIZE;
2160 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2161 if (!cmd_info)
2162 return -EMSGSIZE;
2163 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2164 entry->group_id))
2165 return -EMSGSIZE;
2166 rocker_tlv_nest_end(desc_info, cmd_info);
2167
2168 return 0;
2169}
2170
2171/*****************************************
2172 * Flow, group, FDB, internal VLAN tables
2173 *****************************************/
2174
2175static int rocker_init_tbls(struct rocker *rocker)
2176{
2177 hash_init(rocker->flow_tbl);
2178 spin_lock_init(&rocker->flow_tbl_lock);
2179
2180 hash_init(rocker->group_tbl);
2181 spin_lock_init(&rocker->group_tbl_lock);
2182
2183 hash_init(rocker->fdb_tbl);
2184 spin_lock_init(&rocker->fdb_tbl_lock);
2185
2186 hash_init(rocker->internal_vlan_tbl);
2187 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2188
2189 return 0;
2190}
2191
2192static void rocker_free_tbls(struct rocker *rocker)
2193{
2194 unsigned long flags;
2195 struct rocker_flow_tbl_entry *flow_entry;
2196 struct rocker_group_tbl_entry *group_entry;
2197 struct rocker_fdb_tbl_entry *fdb_entry;
2198 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2199 struct hlist_node *tmp;
2200 int bkt;
2201
2202 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2203 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2204 hash_del(&flow_entry->entry);
2205 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2206
2207 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2208 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2209 hash_del(&group_entry->entry);
2210 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2211
2212 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2213 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2214 hash_del(&fdb_entry->entry);
2215 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2216
2217 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2218 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2219 tmp, internal_vlan_entry, entry)
2220 hash_del(&internal_vlan_entry->entry);
2221 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2222}
2223
2224static struct rocker_flow_tbl_entry *
2225rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
2226{
2227 struct rocker_flow_tbl_entry *found;
2228
2229 hash_for_each_possible(rocker->flow_tbl, found,
2230 entry, match->key_crc32) {
2231 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
2232 return found;
2233 }
2234
2235 return NULL;
2236}
2237
2238static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2239 struct rocker_flow_tbl_entry *match,
2240 bool nowait)
2241{
2242 struct rocker *rocker = rocker_port->rocker;
2243 struct rocker_flow_tbl_entry *found;
2244 unsigned long flags;
2245 bool add_to_hw = false;
2246 int err = 0;
2247
2248 match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
2249
2250 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2251
2252 found = rocker_flow_tbl_find(rocker, match);
2253
2254 if (found) {
2255 kfree(match);
2256 } else {
2257 found = match;
2258 found->cookie = rocker->flow_tbl_next_cookie++;
2259 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2260 add_to_hw = true;
2261 }
2262
2263 found->ref_count++;
2264
2265 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2266
2267 if (add_to_hw) {
2268 err = rocker_cmd_exec(rocker, rocker_port,
2269 rocker_cmd_flow_tbl_add,
2270 found, NULL, NULL, nowait);
2271 if (err) {
2272 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2273 hash_del(&found->entry);
2274 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2275 kfree(found);
2276 }
2277 }
2278
2279 return err;
2280}
2281
2282static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2283 struct rocker_flow_tbl_entry *match,
2284 bool nowait)
2285{
2286 struct rocker *rocker = rocker_port->rocker;
2287 struct rocker_flow_tbl_entry *found;
2288 unsigned long flags;
2289 bool del_from_hw = false;
2290 int err = 0;
2291
2292 match->key_crc32 = crc32(~0, &match->key, sizeof(match->key));
2293
2294 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2295
2296 found = rocker_flow_tbl_find(rocker, match);
2297
2298 if (found) {
2299 found->ref_count--;
2300 if (found->ref_count == 0) {
2301 hash_del(&found->entry);
2302 del_from_hw = true;
2303 }
2304 }
2305
2306 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2307
2308 kfree(match);
2309
2310 if (del_from_hw) {
2311 err = rocker_cmd_exec(rocker, rocker_port,
2312 rocker_cmd_flow_tbl_del,
2313 found, NULL, NULL, nowait);
2314 kfree(found);
2315 }
2316
2317 return err;
2318}
2319
2320static gfp_t rocker_op_flags_gfp(int flags)
2321{
2322 return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
2323}
2324
2325static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2326 int flags, struct rocker_flow_tbl_entry *entry)
2327{
2328 bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2329
2330 if (flags & ROCKER_OP_FLAG_REMOVE)
2331 return rocker_flow_tbl_del(rocker_port, entry, nowait);
2332 else
2333 return rocker_flow_tbl_add(rocker_port, entry, nowait);
2334}
2335
2336static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002337 int flags, u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002338 enum rocker_of_dpa_table_id goto_tbl)
2339{
2340 struct rocker_flow_tbl_entry *entry;
2341
2342 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2343 if (!entry)
2344 return -ENOMEM;
2345
2346 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2347 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002348 entry->key.ig_port.in_pport = in_pport;
2349 entry->key.ig_port.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002350 entry->key.ig_port.goto_tbl = goto_tbl;
2351
2352 return rocker_flow_tbl_do(rocker_port, flags, entry);
2353}
2354
2355static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002356 int flags, u32 in_pport,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002357 __be16 vlan_id, __be16 vlan_id_mask,
2358 enum rocker_of_dpa_table_id goto_tbl,
2359 bool untagged, __be16 new_vlan_id)
2360{
2361 struct rocker_flow_tbl_entry *entry;
2362
2363 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2364 if (!entry)
2365 return -ENOMEM;
2366
2367 entry->key.priority = ROCKER_PRIORITY_VLAN;
2368 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002369 entry->key.vlan.in_pport = in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002370 entry->key.vlan.vlan_id = vlan_id;
2371 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2372 entry->key.vlan.goto_tbl = goto_tbl;
2373
2374 entry->key.vlan.untagged = untagged;
2375 entry->key.vlan.new_vlan_id = new_vlan_id;
2376
2377 return rocker_flow_tbl_do(rocker_port, flags, entry);
2378}
2379
2380static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002381 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002382 __be16 eth_type, const u8 *eth_dst,
2383 const u8 *eth_dst_mask, __be16 vlan_id,
2384 __be16 vlan_id_mask, bool copy_to_cpu,
2385 int flags)
2386{
2387 struct rocker_flow_tbl_entry *entry;
2388
2389 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2390 if (!entry)
2391 return -ENOMEM;
2392
2393 if (is_multicast_ether_addr(eth_dst)) {
2394 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2395 entry->key.term_mac.goto_tbl =
2396 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2397 } else {
2398 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2399 entry->key.term_mac.goto_tbl =
2400 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2401 }
2402
2403 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002404 entry->key.term_mac.in_pport = in_pport;
2405 entry->key.term_mac.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002406 entry->key.term_mac.eth_type = eth_type;
2407 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2408 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2409 entry->key.term_mac.vlan_id = vlan_id;
2410 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2411 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2412
2413 return rocker_flow_tbl_do(rocker_port, flags, entry);
2414}
2415
2416static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2417 int flags,
2418 const u8 *eth_dst, const u8 *eth_dst_mask,
2419 __be16 vlan_id, u32 tunnel_id,
2420 enum rocker_of_dpa_table_id goto_tbl,
2421 u32 group_id, bool copy_to_cpu)
2422{
2423 struct rocker_flow_tbl_entry *entry;
2424 u32 priority;
2425 bool vlan_bridging = !!vlan_id;
2426 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2427 bool wild = false;
2428
2429 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2430 if (!entry)
2431 return -ENOMEM;
2432
2433 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2434
2435 if (eth_dst) {
2436 entry->key.bridge.has_eth_dst = 1;
2437 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2438 }
2439 if (eth_dst_mask) {
2440 entry->key.bridge.has_eth_dst_mask = 1;
2441 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2442 if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
2443 wild = true;
2444 }
2445
2446 priority = ROCKER_PRIORITY_UNKNOWN;
Thomas Graf51ace882014-11-28 14:34:32 +01002447 if (vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002448 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002449 else if (vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002450 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002451 else if (vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002452 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
Thomas Graf51ace882014-11-28 14:34:32 +01002453 else if (!vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002454 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002455 else if (!vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002456 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002457 else if (!vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002458 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2459
2460 entry->key.priority = priority;
2461 entry->key.bridge.vlan_id = vlan_id;
2462 entry->key.bridge.tunnel_id = tunnel_id;
2463 entry->key.bridge.goto_tbl = goto_tbl;
2464 entry->key.bridge.group_id = group_id;
2465 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2466
2467 return rocker_flow_tbl_do(rocker_port, flags, entry);
2468}
2469
2470static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002471 int flags, u32 in_pport,
2472 u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002473 const u8 *eth_src, const u8 *eth_src_mask,
2474 const u8 *eth_dst, const u8 *eth_dst_mask,
2475 __be16 eth_type,
2476 __be16 vlan_id, __be16 vlan_id_mask,
2477 u8 ip_proto, u8 ip_proto_mask,
2478 u8 ip_tos, u8 ip_tos_mask,
2479 u32 group_id)
2480{
2481 u32 priority;
2482 struct rocker_flow_tbl_entry *entry;
2483
2484 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2485 if (!entry)
2486 return -ENOMEM;
2487
2488 priority = ROCKER_PRIORITY_ACL_NORMAL;
2489 if (eth_dst && eth_dst_mask) {
2490 if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
2491 priority = ROCKER_PRIORITY_ACL_DFLT;
2492 else if (is_link_local_ether_addr(eth_dst))
2493 priority = ROCKER_PRIORITY_ACL_CTRL;
2494 }
2495
2496 entry->key.priority = priority;
2497 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002498 entry->key.acl.in_pport = in_pport;
2499 entry->key.acl.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002500
2501 if (eth_src)
2502 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2503 if (eth_src_mask)
2504 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2505 if (eth_dst)
2506 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2507 if (eth_dst_mask)
2508 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2509
2510 entry->key.acl.eth_type = eth_type;
2511 entry->key.acl.vlan_id = vlan_id;
2512 entry->key.acl.vlan_id_mask = vlan_id_mask;
2513 entry->key.acl.ip_proto = ip_proto;
2514 entry->key.acl.ip_proto_mask = ip_proto_mask;
2515 entry->key.acl.ip_tos = ip_tos;
2516 entry->key.acl.ip_tos_mask = ip_tos_mask;
2517 entry->key.acl.group_id = group_id;
2518
2519 return rocker_flow_tbl_do(rocker_port, flags, entry);
2520}
2521
2522static struct rocker_group_tbl_entry *
2523rocker_group_tbl_find(struct rocker *rocker,
2524 struct rocker_group_tbl_entry *match)
2525{
2526 struct rocker_group_tbl_entry *found;
2527
2528 hash_for_each_possible(rocker->group_tbl, found,
2529 entry, match->group_id) {
2530 if (found->group_id == match->group_id)
2531 return found;
2532 }
2533
2534 return NULL;
2535}
2536
2537static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
2538{
2539 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2540 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2541 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2542 kfree(entry->group_ids);
2543 break;
2544 default:
2545 break;
2546 }
2547 kfree(entry);
2548}
2549
2550static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2551 struct rocker_group_tbl_entry *match,
2552 bool nowait)
2553{
2554 struct rocker *rocker = rocker_port->rocker;
2555 struct rocker_group_tbl_entry *found;
2556 unsigned long flags;
2557 int err = 0;
2558
2559 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2560
2561 found = rocker_group_tbl_find(rocker, match);
2562
2563 if (found) {
2564 hash_del(&found->entry);
2565 rocker_group_tbl_entry_free(found);
2566 found = match;
2567 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2568 } else {
2569 found = match;
2570 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2571 }
2572
2573 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2574
2575 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2576
2577 if (found->cmd)
2578 err = rocker_cmd_exec(rocker, rocker_port,
2579 rocker_cmd_group_tbl_add,
2580 found, NULL, NULL, nowait);
2581
2582 return err;
2583}
2584
2585static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2586 struct rocker_group_tbl_entry *match,
2587 bool nowait)
2588{
2589 struct rocker *rocker = rocker_port->rocker;
2590 struct rocker_group_tbl_entry *found;
2591 unsigned long flags;
2592 int err = 0;
2593
2594 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2595
2596 found = rocker_group_tbl_find(rocker, match);
2597
2598 if (found) {
2599 hash_del(&found->entry);
2600 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2601 }
2602
2603 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2604
2605 rocker_group_tbl_entry_free(match);
2606
2607 if (found) {
2608 err = rocker_cmd_exec(rocker, rocker_port,
2609 rocker_cmd_group_tbl_del,
2610 found, NULL, NULL, nowait);
2611 rocker_group_tbl_entry_free(found);
2612 }
2613
2614 return err;
2615}
2616
2617static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2618 int flags, struct rocker_group_tbl_entry *entry)
2619{
2620 bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2621
2622 if (flags & ROCKER_OP_FLAG_REMOVE)
2623 return rocker_group_tbl_del(rocker_port, entry, nowait);
2624 else
2625 return rocker_group_tbl_add(rocker_port, entry, nowait);
2626}
2627
2628static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2629 int flags, __be16 vlan_id,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002630 u32 out_pport, int pop_vlan)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002631{
2632 struct rocker_group_tbl_entry *entry;
2633
2634 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2635 if (!entry)
2636 return -ENOMEM;
2637
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002638 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002639 entry->l2_interface.pop_vlan = pop_vlan;
2640
2641 return rocker_group_tbl_do(rocker_port, flags, entry);
2642}
2643
2644static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2645 int flags, u8 group_count,
2646 u32 *group_ids, u32 group_id)
2647{
2648 struct rocker_group_tbl_entry *entry;
2649
2650 entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2651 if (!entry)
2652 return -ENOMEM;
2653
2654 entry->group_id = group_id;
2655 entry->group_count = group_count;
2656
2657 entry->group_ids = kcalloc(group_count, sizeof(u32),
2658 rocker_op_flags_gfp(flags));
2659 if (!entry->group_ids) {
2660 kfree(entry);
2661 return -ENOMEM;
2662 }
2663 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2664
2665 return rocker_group_tbl_do(rocker_port, flags, entry);
2666}
2667
2668static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2669 int flags, __be16 vlan_id,
2670 u8 group_count, u32 *group_ids,
2671 u32 group_id)
2672{
2673 return rocker_group_l2_fan_out(rocker_port, flags,
2674 group_count, group_ids,
2675 group_id);
2676}
2677
Scott Feldman6c707942014-11-28 14:34:28 +01002678static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
2679 int flags, __be16 vlan_id)
2680{
2681 struct rocker_port *p;
2682 struct rocker *rocker = rocker_port->rocker;
2683 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2684 u32 group_ids[rocker->port_count];
2685 u8 group_count = 0;
2686 int err;
2687 int i;
2688
2689 /* Adjust the flood group for this VLAN. The flood group
2690 * references an L2 interface group for each port in this
2691 * VLAN.
2692 */
2693
2694 for (i = 0; i < rocker->port_count; i++) {
2695 p = rocker->ports[i];
2696 if (!rocker_port_is_bridged(p))
2697 continue;
2698 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
2699 group_ids[group_count++] =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002700 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
Scott Feldman6c707942014-11-28 14:34:28 +01002701 }
2702 }
2703
2704 /* If there are no bridged ports in this VLAN, we're done */
2705 if (group_count == 0)
2706 return 0;
2707
2708 err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
2709 group_count, group_ids,
2710 group_id);
2711 if (err)
2712 netdev_err(rocker_port->dev,
2713 "Error (%d) port VLAN l2 flood group\n", err);
2714
2715 return err;
2716}
2717
2718static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
2719 int flags, __be16 vlan_id,
2720 bool pop_vlan)
2721{
2722 struct rocker *rocker = rocker_port->rocker;
2723 struct rocker_port *p;
2724 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002725 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01002726 int ref = 0;
2727 int err;
2728 int i;
2729
2730 /* An L2 interface group for this port in this VLAN, but
2731 * only when port STP state is LEARNING|FORWARDING.
2732 */
2733
2734 if (rocker_port->stp_state == BR_STATE_LEARNING ||
2735 rocker_port->stp_state == BR_STATE_FORWARDING) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002736 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01002737 err = rocker_group_l2_interface(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002738 vlan_id, out_pport,
Scott Feldman6c707942014-11-28 14:34:28 +01002739 pop_vlan);
2740 if (err) {
2741 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002742 "Error (%d) port VLAN l2 group for pport %d\n",
2743 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01002744 return err;
2745 }
2746 }
2747
2748 /* An L2 interface group for this VLAN to CPU port.
2749 * Add when first port joins this VLAN and destroy when
2750 * last port leaves this VLAN.
2751 */
2752
2753 for (i = 0; i < rocker->port_count; i++) {
2754 p = rocker->ports[i];
2755 if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
2756 ref++;
2757 }
2758
2759 if ((!adding || ref != 1) && (adding || ref != 0))
2760 return 0;
2761
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002762 out_pport = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01002763 err = rocker_group_l2_interface(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002764 vlan_id, out_pport,
Scott Feldman6c707942014-11-28 14:34:28 +01002765 pop_vlan);
2766 if (err) {
2767 netdev_err(rocker_port->dev,
2768 "Error (%d) port VLAN l2 group for CPU port\n", err);
2769 return err;
2770 }
2771
2772 return 0;
2773}
2774
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002775static struct rocker_ctrl {
2776 const u8 *eth_dst;
2777 const u8 *eth_dst_mask;
Jiri Pirko11e6c652014-12-03 14:14:54 +01002778 __be16 eth_type;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002779 bool acl;
2780 bool bridge;
2781 bool term;
2782 bool copy_to_cpu;
2783} rocker_ctrls[] = {
2784 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
2785 /* pass link local multicast pkts up to CPU for filtering */
2786 .eth_dst = ll_mac,
2787 .eth_dst_mask = ll_mask,
2788 .acl = true,
2789 },
2790 [ROCKER_CTRL_LOCAL_ARP] = {
2791 /* pass local ARP pkts up to CPU */
2792 .eth_dst = zero_mac,
2793 .eth_dst_mask = zero_mac,
2794 .eth_type = htons(ETH_P_ARP),
2795 .acl = true,
2796 },
2797 [ROCKER_CTRL_IPV4_MCAST] = {
2798 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
2799 .eth_dst = ipv4_mcast,
2800 .eth_dst_mask = ipv4_mask,
2801 .eth_type = htons(ETH_P_IP),
2802 .term = true,
2803 .copy_to_cpu = true,
2804 },
2805 [ROCKER_CTRL_IPV6_MCAST] = {
2806 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
2807 .eth_dst = ipv6_mcast,
2808 .eth_dst_mask = ipv6_mask,
2809 .eth_type = htons(ETH_P_IPV6),
2810 .term = true,
2811 .copy_to_cpu = true,
2812 },
2813 [ROCKER_CTRL_DFLT_BRIDGING] = {
2814 /* flood any pkts on vlan */
2815 .bridge = true,
2816 .copy_to_cpu = true,
2817 },
2818};
2819
2820static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
2821 int flags, struct rocker_ctrl *ctrl,
2822 __be16 vlan_id)
2823{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002824 u32 in_pport = rocker_port->pport;
2825 u32 in_pport_mask = 0xffffffff;
2826 u32 out_pport = 0;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002827 u8 *eth_src = NULL;
2828 u8 *eth_src_mask = NULL;
2829 __be16 vlan_id_mask = htons(0xffff);
2830 u8 ip_proto = 0;
2831 u8 ip_proto_mask = 0;
2832 u8 ip_tos = 0;
2833 u8 ip_tos_mask = 0;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002834 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002835 int err;
2836
2837 err = rocker_flow_tbl_acl(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002838 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002839 eth_src, eth_src_mask,
2840 ctrl->eth_dst, ctrl->eth_dst_mask,
2841 ctrl->eth_type,
2842 vlan_id, vlan_id_mask,
2843 ip_proto, ip_proto_mask,
2844 ip_tos, ip_tos_mask,
2845 group_id);
2846
2847 if (err)
2848 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
2849
2850 return err;
2851}
2852
Scott Feldman6c707942014-11-28 14:34:28 +01002853static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
2854 int flags, struct rocker_ctrl *ctrl,
2855 __be16 vlan_id)
2856{
2857 enum rocker_of_dpa_table_id goto_tbl =
2858 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2859 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
2860 u32 tunnel_id = 0;
2861 int err;
2862
2863 if (!rocker_port_is_bridged(rocker_port))
2864 return 0;
2865
2866 err = rocker_flow_tbl_bridge(rocker_port, flags,
2867 ctrl->eth_dst, ctrl->eth_dst_mask,
2868 vlan_id, tunnel_id,
2869 goto_tbl, group_id, ctrl->copy_to_cpu);
2870
2871 if (err)
2872 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
2873
2874 return err;
2875}
2876
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002877static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
2878 int flags, struct rocker_ctrl *ctrl,
2879 __be16 vlan_id)
2880{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002881 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002882 __be16 vlan_id_mask = htons(0xffff);
2883 int err;
2884
2885 if (ntohs(vlan_id) == 0)
2886 vlan_id = rocker_port->internal_vlan_id;
2887
2888 err = rocker_flow_tbl_term_mac(rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002889 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002890 ctrl->eth_type, ctrl->eth_dst,
2891 ctrl->eth_dst_mask, vlan_id,
2892 vlan_id_mask, ctrl->copy_to_cpu,
2893 flags);
2894
2895 if (err)
2896 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
2897
2898 return err;
2899}
2900
2901static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
2902 struct rocker_ctrl *ctrl, __be16 vlan_id)
2903{
2904 if (ctrl->acl)
2905 return rocker_port_ctrl_vlan_acl(rocker_port, flags,
2906 ctrl, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01002907 if (ctrl->bridge)
2908 return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
2909 ctrl, vlan_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002910
2911 if (ctrl->term)
2912 return rocker_port_ctrl_vlan_term(rocker_port, flags,
2913 ctrl, vlan_id);
2914
2915 return -EOPNOTSUPP;
2916}
2917
2918static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
2919 int flags, __be16 vlan_id)
2920{
2921 int err = 0;
2922 int i;
2923
2924 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
2925 if (rocker_port->ctrls[i]) {
2926 err = rocker_port_ctrl_vlan(rocker_port, flags,
2927 &rocker_ctrls[i], vlan_id);
2928 if (err)
2929 return err;
2930 }
2931 }
2932
2933 return err;
2934}
2935
2936static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
2937 struct rocker_ctrl *ctrl)
2938{
2939 u16 vid;
2940 int err = 0;
2941
2942 for (vid = 1; vid < VLAN_N_VID; vid++) {
2943 if (!test_bit(vid, rocker_port->vlan_bitmap))
2944 continue;
2945 err = rocker_port_ctrl_vlan(rocker_port, flags,
2946 ctrl, htons(vid));
2947 if (err)
2948 break;
2949 }
2950
2951 return err;
2952}
2953
Scott Feldman6c707942014-11-28 14:34:28 +01002954static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
2955 u16 vid)
2956{
2957 enum rocker_of_dpa_table_id goto_tbl =
2958 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002959 u32 in_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01002960 __be16 vlan_id = htons(vid);
2961 __be16 vlan_id_mask = htons(0xffff);
2962 __be16 internal_vlan_id;
2963 bool untagged;
2964 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2965 int err;
2966
2967 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
2968
2969 if (adding && test_and_set_bit(ntohs(internal_vlan_id),
2970 rocker_port->vlan_bitmap))
2971 return 0; /* already added */
2972 else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
2973 rocker_port->vlan_bitmap))
2974 return 0; /* already removed */
2975
2976 if (adding) {
2977 err = rocker_port_ctrl_vlan_add(rocker_port, flags,
2978 internal_vlan_id);
2979 if (err) {
2980 netdev_err(rocker_port->dev,
2981 "Error (%d) port ctrl vlan add\n", err);
2982 return err;
2983 }
2984 }
2985
2986 err = rocker_port_vlan_l2_groups(rocker_port, flags,
2987 internal_vlan_id, untagged);
2988 if (err) {
2989 netdev_err(rocker_port->dev,
2990 "Error (%d) port VLAN l2 groups\n", err);
2991 return err;
2992 }
2993
2994 err = rocker_port_vlan_flood_group(rocker_port, flags,
2995 internal_vlan_id);
2996 if (err) {
2997 netdev_err(rocker_port->dev,
2998 "Error (%d) port VLAN l2 flood group\n", err);
2999 return err;
3000 }
3001
3002 err = rocker_flow_tbl_vlan(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003003 in_pport, vlan_id, vlan_id_mask,
Scott Feldman6c707942014-11-28 14:34:28 +01003004 goto_tbl, untagged, internal_vlan_id);
3005 if (err)
3006 netdev_err(rocker_port->dev,
3007 "Error (%d) port VLAN table\n", err);
3008
3009 return err;
3010}
3011
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003012static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
3013{
3014 enum rocker_of_dpa_table_id goto_tbl;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003015 u32 in_pport;
3016 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003017 int err;
3018
3019 /* Normal Ethernet Frames. Matches pkts from any local physical
3020 * ports. Goto VLAN tbl.
3021 */
3022
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003023 in_pport = 0;
3024 in_pport_mask = 0xffff0000;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003025 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3026
3027 err = rocker_flow_tbl_ig_port(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003028 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003029 goto_tbl);
3030 if (err)
3031 netdev_err(rocker_port->dev,
3032 "Error (%d) ingress port table entry\n", err);
3033
3034 return err;
3035}
3036
Scott Feldman6c707942014-11-28 14:34:28 +01003037struct rocker_fdb_learn_work {
3038 struct work_struct work;
3039 struct net_device *dev;
3040 int flags;
3041 u8 addr[ETH_ALEN];
3042 u16 vid;
3043};
3044
3045static void rocker_port_fdb_learn_work(struct work_struct *work)
3046{
3047 struct rocker_fdb_learn_work *lw =
3048 container_of(work, struct rocker_fdb_learn_work, work);
3049 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3050 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003051 struct netdev_switch_notifier_fdb_info info;
3052
3053 info.addr = lw->addr;
3054 info.vid = lw->vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003055
Thomas Graf51ace882014-11-28 14:34:32 +01003056 if (learned && removing)
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003057 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
3058 lw->dev, &info.info);
Thomas Graf51ace882014-11-28 14:34:32 +01003059 else if (learned && !removing)
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003060 call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
3061 lw->dev, &info.info);
Scott Feldman6c707942014-11-28 14:34:28 +01003062
3063 kfree(work);
3064}
3065
3066static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3067 int flags, const u8 *addr, __be16 vlan_id)
3068{
3069 struct rocker_fdb_learn_work *lw;
3070 enum rocker_of_dpa_table_id goto_tbl =
3071 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003072 u32 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003073 u32 tunnel_id = 0;
3074 u32 group_id = ROCKER_GROUP_NONE;
Scott Feldman5111f802014-11-28 14:34:30 +01003075 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
Scott Feldman6c707942014-11-28 14:34:28 +01003076 bool copy_to_cpu = false;
3077 int err;
3078
3079 if (rocker_port_is_bridged(rocker_port))
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003080 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003081
3082 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3083 err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
3084 vlan_id, tunnel_id, goto_tbl,
3085 group_id, copy_to_cpu);
3086 if (err)
3087 return err;
3088 }
3089
Scott Feldman5111f802014-11-28 14:34:30 +01003090 if (!syncing)
3091 return 0;
3092
Scott Feldman6c707942014-11-28 14:34:28 +01003093 if (!rocker_port_is_bridged(rocker_port))
3094 return 0;
3095
3096 lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
3097 if (!lw)
3098 return -ENOMEM;
3099
3100 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3101
3102 lw->dev = rocker_port->dev;
3103 lw->flags = flags;
3104 ether_addr_copy(lw->addr, addr);
3105 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3106
3107 schedule_work(&lw->work);
3108
3109 return 0;
3110}
3111
3112static struct rocker_fdb_tbl_entry *
3113rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
3114{
3115 struct rocker_fdb_tbl_entry *found;
3116
3117 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3118 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3119 return found;
3120
3121 return NULL;
3122}
3123
3124static int rocker_port_fdb(struct rocker_port *rocker_port,
3125 const unsigned char *addr,
3126 __be16 vlan_id, int flags)
3127{
3128 struct rocker *rocker = rocker_port->rocker;
3129 struct rocker_fdb_tbl_entry *fdb;
3130 struct rocker_fdb_tbl_entry *found;
3131 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3132 unsigned long lock_flags;
3133
3134 fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
3135 if (!fdb)
3136 return -ENOMEM;
3137
3138 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003139 fdb->key.pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003140 ether_addr_copy(fdb->key.addr, addr);
3141 fdb->key.vlan_id = vlan_id;
3142 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3143
3144 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3145
3146 found = rocker_fdb_tbl_find(rocker, fdb);
3147
3148 if (removing && found) {
3149 kfree(fdb);
3150 hash_del(&found->entry);
3151 } else if (!removing && !found) {
3152 hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
3153 }
3154
3155 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3156
3157 /* Check if adding and already exists, or removing and can't find */
3158 if (!found != !removing) {
3159 kfree(fdb);
3160 if (!found && removing)
3161 return 0;
3162 /* Refreshing existing to update aging timers */
3163 flags |= ROCKER_OP_FLAG_REFRESH;
3164 }
3165
3166 return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
3167}
3168
3169static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
3170{
3171 struct rocker *rocker = rocker_port->rocker;
3172 struct rocker_fdb_tbl_entry *found;
3173 unsigned long lock_flags;
3174 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3175 struct hlist_node *tmp;
3176 int bkt;
3177 int err = 0;
3178
3179 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3180 rocker_port->stp_state == BR_STATE_FORWARDING)
3181 return 0;
3182
3183 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3184
3185 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003186 if (found->key.pport != rocker_port->pport)
Scott Feldman6c707942014-11-28 14:34:28 +01003187 continue;
3188 if (!found->learned)
3189 continue;
3190 err = rocker_port_fdb_learn(rocker_port, flags,
3191 found->key.addr,
3192 found->key.vlan_id);
3193 if (err)
3194 goto err_out;
3195 hash_del(&found->entry);
3196 }
3197
3198err_out:
3199 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3200
3201 return err;
3202}
3203
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003204static int rocker_port_router_mac(struct rocker_port *rocker_port,
3205 int flags, __be16 vlan_id)
3206{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003207 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003208 __be16 eth_type;
3209 const u8 *dst_mac_mask = ff_mac;
3210 __be16 vlan_id_mask = htons(0xffff);
3211 bool copy_to_cpu = false;
3212 int err;
3213
3214 if (ntohs(vlan_id) == 0)
3215 vlan_id = rocker_port->internal_vlan_id;
3216
3217 eth_type = htons(ETH_P_IP);
3218 err = rocker_flow_tbl_term_mac(rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003219 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003220 eth_type, rocker_port->dev->dev_addr,
3221 dst_mac_mask, vlan_id, vlan_id_mask,
3222 copy_to_cpu, flags);
3223 if (err)
3224 return err;
3225
3226 eth_type = htons(ETH_P_IPV6);
3227 err = rocker_flow_tbl_term_mac(rocker_port,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003228 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003229 eth_type, rocker_port->dev->dev_addr,
3230 dst_mac_mask, vlan_id, vlan_id_mask,
3231 copy_to_cpu, flags);
3232
3233 return err;
3234}
3235
Scott Feldman6c707942014-11-28 14:34:28 +01003236static int rocker_port_fwding(struct rocker_port *rocker_port)
3237{
3238 bool pop_vlan;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003239 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003240 __be16 vlan_id;
3241 u16 vid;
3242 int flags = ROCKER_OP_FLAG_NOWAIT;
3243 int err;
3244
3245 /* Port will be forwarding-enabled if its STP state is LEARNING
3246 * or FORWARDING. Traffic from CPU can still egress, regardless of
3247 * port STP state. Use L2 interface group on port VLANs as a way
3248 * to toggle port forwarding: if forwarding is disabled, L2
3249 * interface group will not exist.
3250 */
3251
3252 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3253 rocker_port->stp_state != BR_STATE_FORWARDING)
3254 flags |= ROCKER_OP_FLAG_REMOVE;
3255
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003256 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003257 for (vid = 1; vid < VLAN_N_VID; vid++) {
3258 if (!test_bit(vid, rocker_port->vlan_bitmap))
3259 continue;
3260 vlan_id = htons(vid);
3261 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3262 err = rocker_group_l2_interface(rocker_port, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003263 vlan_id, out_pport,
Scott Feldman6c707942014-11-28 14:34:28 +01003264 pop_vlan);
3265 if (err) {
3266 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003267 "Error (%d) port VLAN l2 group for pport %d\n",
3268 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003269 return err;
3270 }
3271 }
3272
3273 return 0;
3274}
3275
3276static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
3277{
3278 bool want[ROCKER_CTRL_MAX] = { 0, };
3279 int flags;
3280 int err;
3281 int i;
3282
3283 if (rocker_port->stp_state == state)
3284 return 0;
3285
3286 rocker_port->stp_state = state;
3287
3288 switch (state) {
3289 case BR_STATE_DISABLED:
3290 /* port is completely disabled */
3291 break;
3292 case BR_STATE_LISTENING:
3293 case BR_STATE_BLOCKING:
3294 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3295 break;
3296 case BR_STATE_LEARNING:
3297 case BR_STATE_FORWARDING:
3298 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3299 want[ROCKER_CTRL_IPV4_MCAST] = true;
3300 want[ROCKER_CTRL_IPV6_MCAST] = true;
3301 if (rocker_port_is_bridged(rocker_port))
3302 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3303 else
3304 want[ROCKER_CTRL_LOCAL_ARP] = true;
3305 break;
3306 }
3307
3308 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3309 if (want[i] != rocker_port->ctrls[i]) {
3310 flags = ROCKER_OP_FLAG_NOWAIT |
3311 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3312 err = rocker_port_ctrl(rocker_port, flags,
3313 &rocker_ctrls[i]);
3314 if (err)
3315 return err;
3316 rocker_port->ctrls[i] = want[i];
3317 }
3318 }
3319
3320 err = rocker_port_fdb_flush(rocker_port);
3321 if (err)
3322 return err;
3323
3324 return rocker_port_fwding(rocker_port);
3325}
3326
Scott Feldmane47172a2015-02-25 20:15:38 -08003327static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
3328{
3329 if (rocker_port_is_bridged(rocker_port))
3330 /* bridge STP will enable port */
3331 return 0;
3332
3333 /* port is not bridged, so simulate going to FORWARDING state */
3334 return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
3335}
3336
3337static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
3338{
3339 if (rocker_port_is_bridged(rocker_port))
3340 /* bridge STP will disable port */
3341 return 0;
3342
3343 /* port is not bridged, so simulate going to DISABLED state */
3344 return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
3345}
3346
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003347static struct rocker_internal_vlan_tbl_entry *
3348rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
3349{
3350 struct rocker_internal_vlan_tbl_entry *found;
3351
3352 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3353 entry, ifindex) {
3354 if (found->ifindex == ifindex)
3355 return found;
3356 }
3357
3358 return NULL;
3359}
3360
3361static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3362 int ifindex)
3363{
3364 struct rocker *rocker = rocker_port->rocker;
3365 struct rocker_internal_vlan_tbl_entry *entry;
3366 struct rocker_internal_vlan_tbl_entry *found;
3367 unsigned long lock_flags;
3368 int i;
3369
3370 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3371 if (!entry)
3372 return 0;
3373
3374 entry->ifindex = ifindex;
3375
3376 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3377
3378 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3379 if (found) {
3380 kfree(entry);
3381 goto found;
3382 }
3383
3384 found = entry;
3385 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3386
3387 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3388 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3389 continue;
3390 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3391 goto found;
3392 }
3393
3394 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3395
3396found:
3397 found->ref_count++;
3398 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3399
3400 return found->vlan_id;
3401}
3402
3403static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
3404 int ifindex)
3405{
3406 struct rocker *rocker = rocker_port->rocker;
3407 struct rocker_internal_vlan_tbl_entry *found;
3408 unsigned long lock_flags;
3409 unsigned long bit;
3410
3411 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3412
3413 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3414 if (!found) {
3415 netdev_err(rocker_port->dev,
3416 "ifindex (%d) not found in internal VLAN tbl\n",
3417 ifindex);
3418 goto not_found;
3419 }
3420
3421 if (--found->ref_count <= 0) {
3422 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3423 clear_bit(bit, rocker->internal_vlan_bitmap);
3424 hash_del(&found->entry);
3425 kfree(found);
3426 }
3427
3428not_found:
3429 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3430}
3431
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003432/*****************
3433 * Net device ops
3434 *****************/
3435
3436static int rocker_port_open(struct net_device *dev)
3437{
3438 struct rocker_port *rocker_port = netdev_priv(dev);
3439 int err;
3440
3441 err = rocker_port_dma_rings_init(rocker_port);
3442 if (err)
3443 return err;
3444
3445 err = request_irq(rocker_msix_tx_vector(rocker_port),
3446 rocker_tx_irq_handler, 0,
3447 rocker_driver_name, rocker_port);
3448 if (err) {
3449 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3450 goto err_request_tx_irq;
3451 }
3452
3453 err = request_irq(rocker_msix_rx_vector(rocker_port),
3454 rocker_rx_irq_handler, 0,
3455 rocker_driver_name, rocker_port);
3456 if (err) {
3457 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3458 goto err_request_rx_irq;
3459 }
3460
Scott Feldmane47172a2015-02-25 20:15:38 -08003461 err = rocker_port_fwd_enable(rocker_port);
Scott Feldman6c707942014-11-28 14:34:28 +01003462 if (err)
Scott Feldmane47172a2015-02-25 20:15:38 -08003463 goto err_fwd_enable;
Scott Feldman6c707942014-11-28 14:34:28 +01003464
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003465 napi_enable(&rocker_port->napi_tx);
3466 napi_enable(&rocker_port->napi_rx);
3467 rocker_port_set_enable(rocker_port, true);
3468 netif_start_queue(dev);
3469 return 0;
3470
Scott Feldmane47172a2015-02-25 20:15:38 -08003471err_fwd_enable:
Scott Feldman6c707942014-11-28 14:34:28 +01003472 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003473err_request_rx_irq:
3474 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3475err_request_tx_irq:
3476 rocker_port_dma_rings_fini(rocker_port);
3477 return err;
3478}
3479
3480static int rocker_port_stop(struct net_device *dev)
3481{
3482 struct rocker_port *rocker_port = netdev_priv(dev);
3483
3484 netif_stop_queue(dev);
3485 rocker_port_set_enable(rocker_port, false);
3486 napi_disable(&rocker_port->napi_rx);
3487 napi_disable(&rocker_port->napi_tx);
Scott Feldmane47172a2015-02-25 20:15:38 -08003488 rocker_port_fwd_disable(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003489 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3490 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3491 rocker_port_dma_rings_fini(rocker_port);
3492
3493 return 0;
3494}
3495
3496static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
3497 struct rocker_desc_info *desc_info)
3498{
3499 struct rocker *rocker = rocker_port->rocker;
3500 struct pci_dev *pdev = rocker->pdev;
3501 struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
3502 struct rocker_tlv *attr;
3503 int rem;
3504
3505 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
3506 if (!attrs[ROCKER_TLV_TX_FRAGS])
3507 return;
3508 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
3509 struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
3510 dma_addr_t dma_handle;
3511 size_t len;
3512
3513 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
3514 continue;
3515 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
3516 attr);
3517 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
3518 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
3519 continue;
3520 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
3521 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
3522 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
3523 }
3524}
3525
3526static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
3527 struct rocker_desc_info *desc_info,
3528 char *buf, size_t buf_len)
3529{
3530 struct rocker *rocker = rocker_port->rocker;
3531 struct pci_dev *pdev = rocker->pdev;
3532 dma_addr_t dma_handle;
3533 struct rocker_tlv *frag;
3534
3535 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
3536 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
3537 if (net_ratelimit())
3538 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
3539 return -EIO;
3540 }
3541 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
3542 if (!frag)
3543 goto unmap_frag;
3544 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
3545 dma_handle))
3546 goto nest_cancel;
3547 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
3548 buf_len))
3549 goto nest_cancel;
3550 rocker_tlv_nest_end(desc_info, frag);
3551 return 0;
3552
3553nest_cancel:
3554 rocker_tlv_nest_cancel(desc_info, frag);
3555unmap_frag:
3556 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
3557 return -EMSGSIZE;
3558}
3559
3560static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
3561{
3562 struct rocker_port *rocker_port = netdev_priv(dev);
3563 struct rocker *rocker = rocker_port->rocker;
3564 struct rocker_desc_info *desc_info;
3565 struct rocker_tlv *frags;
3566 int i;
3567 int err;
3568
3569 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3570 if (unlikely(!desc_info)) {
3571 if (net_ratelimit())
3572 netdev_err(dev, "tx ring full when queue awake\n");
3573 return NETDEV_TX_BUSY;
3574 }
3575
3576 rocker_desc_cookie_ptr_set(desc_info, skb);
3577
3578 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
3579 if (!frags)
3580 goto out;
3581 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3582 skb->data, skb_headlen(skb));
3583 if (err)
3584 goto nest_cancel;
3585 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
3586 goto nest_cancel;
3587
3588 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3589 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3590
3591 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3592 skb_frag_address(frag),
3593 skb_frag_size(frag));
3594 if (err)
3595 goto unmap_frags;
3596 }
3597 rocker_tlv_nest_end(desc_info, frags);
3598
3599 rocker_desc_gen_clear(desc_info);
3600 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
3601
3602 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3603 if (!desc_info)
3604 netif_stop_queue(dev);
3605
3606 return NETDEV_TX_OK;
3607
3608unmap_frags:
3609 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
3610nest_cancel:
3611 rocker_tlv_nest_cancel(desc_info, frags);
3612out:
3613 dev_kfree_skb(skb);
David Ahernf2bbca52015-01-16 14:22:29 -07003614 dev->stats.tx_dropped++;
3615
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003616 return NETDEV_TX_OK;
3617}
3618
3619static int rocker_port_set_mac_address(struct net_device *dev, void *p)
3620{
3621 struct sockaddr *addr = p;
3622 struct rocker_port *rocker_port = netdev_priv(dev);
3623 int err;
3624
3625 if (!is_valid_ether_addr(addr->sa_data))
3626 return -EADDRNOTAVAIL;
3627
3628 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
3629 if (err)
3630 return err;
3631 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3632 return 0;
3633}
3634
Scott Feldman6c707942014-11-28 14:34:28 +01003635static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
3636 __be16 proto, u16 vid)
3637{
3638 struct rocker_port *rocker_port = netdev_priv(dev);
3639 int err;
3640
3641 err = rocker_port_vlan(rocker_port, 0, vid);
3642 if (err)
3643 return err;
3644
3645 return rocker_port_router_mac(rocker_port, 0, htons(vid));
3646}
3647
3648static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
3649 __be16 proto, u16 vid)
3650{
3651 struct rocker_port *rocker_port = netdev_priv(dev);
3652 int err;
3653
3654 err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
3655 htons(vid));
3656 if (err)
3657 return err;
3658
3659 return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
3660}
3661
3662static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
3663 struct net_device *dev,
3664 const unsigned char *addr, u16 vid,
3665 u16 nlm_flags)
3666{
3667 struct rocker_port *rocker_port = netdev_priv(dev);
3668 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
3669 int flags = 0;
3670
3671 if (!rocker_port_is_bridged(rocker_port))
3672 return -EINVAL;
3673
3674 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
3675}
3676
3677static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
3678 struct net_device *dev,
3679 const unsigned char *addr, u16 vid)
3680{
3681 struct rocker_port *rocker_port = netdev_priv(dev);
3682 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
3683 int flags = ROCKER_OP_FLAG_REMOVE;
3684
3685 if (!rocker_port_is_bridged(rocker_port))
3686 return -EINVAL;
3687
3688 return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
3689}
3690
Jiri Pirkoce76ca62014-11-28 14:34:29 +01003691static int rocker_fdb_fill_info(struct sk_buff *skb,
3692 struct rocker_port *rocker_port,
3693 const unsigned char *addr, u16 vid,
3694 u32 portid, u32 seq, int type,
3695 unsigned int flags)
3696{
3697 struct nlmsghdr *nlh;
3698 struct ndmsg *ndm;
3699
3700 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
3701 if (!nlh)
3702 return -EMSGSIZE;
3703
3704 ndm = nlmsg_data(nlh);
3705 ndm->ndm_family = AF_BRIDGE;
3706 ndm->ndm_pad1 = 0;
3707 ndm->ndm_pad2 = 0;
3708 ndm->ndm_flags = NTF_SELF;
3709 ndm->ndm_type = 0;
3710 ndm->ndm_ifindex = rocker_port->dev->ifindex;
3711 ndm->ndm_state = NUD_REACHABLE;
3712
3713 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
3714 goto nla_put_failure;
3715
3716 if (vid && nla_put_u16(skb, NDA_VLAN, vid))
3717 goto nla_put_failure;
3718
Johannes Berg053c0952015-01-16 22:09:00 +01003719 nlmsg_end(skb, nlh);
3720 return 0;
Jiri Pirkoce76ca62014-11-28 14:34:29 +01003721
3722nla_put_failure:
3723 nlmsg_cancel(skb, nlh);
3724 return -EMSGSIZE;
3725}
3726
3727static int rocker_port_fdb_dump(struct sk_buff *skb,
3728 struct netlink_callback *cb,
3729 struct net_device *dev,
3730 struct net_device *filter_dev,
3731 int idx)
3732{
3733 struct rocker_port *rocker_port = netdev_priv(dev);
3734 struct rocker *rocker = rocker_port->rocker;
3735 struct rocker_fdb_tbl_entry *found;
3736 struct hlist_node *tmp;
3737 int bkt;
3738 unsigned long lock_flags;
3739 const unsigned char *addr;
3740 u16 vid;
3741 int err;
3742
3743 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3744 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003745 if (found->key.pport != rocker_port->pport)
Jiri Pirkoce76ca62014-11-28 14:34:29 +01003746 continue;
3747 if (idx < cb->args[0])
3748 goto skip;
3749 addr = found->key.addr;
3750 vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
3751 err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
3752 NETLINK_CB(cb->skb).portid,
3753 cb->nlh->nlmsg_seq,
3754 RTM_NEWNEIGH, NLM_F_MULTI);
3755 if (err < 0)
3756 break;
3757skip:
3758 ++idx;
3759 }
3760 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3761 return idx;
3762}
3763
Scott Feldman5111f802014-11-28 14:34:30 +01003764static int rocker_port_bridge_setlink(struct net_device *dev,
Roopa Prabhuadd511b2015-01-29 22:40:12 -08003765 struct nlmsghdr *nlh, u16 flags)
Scott Feldman5111f802014-11-28 14:34:30 +01003766{
3767 struct rocker_port *rocker_port = netdev_priv(dev);
3768 struct nlattr *protinfo;
Scott Feldman5111f802014-11-28 14:34:30 +01003769 struct nlattr *attr;
Scott Feldman5111f802014-11-28 14:34:30 +01003770 int err;
3771
3772 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
3773 IFLA_PROTINFO);
Scott Feldman5111f802014-11-28 14:34:30 +01003774 if (protinfo) {
3775 attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
3776 if (attr) {
Thomas Grafe7560582014-11-28 14:34:31 +01003777 if (nla_len(attr) < sizeof(u8))
3778 return -EINVAL;
3779
Scott Feldman5111f802014-11-28 14:34:30 +01003780 if (nla_get_u8(attr))
3781 rocker_port->brport_flags |= BR_LEARNING;
3782 else
3783 rocker_port->brport_flags &= ~BR_LEARNING;
3784 err = rocker_port_set_learning(rocker_port);
3785 if (err)
3786 return err;
3787 }
3788 attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
3789 if (attr) {
Thomas Grafe7560582014-11-28 14:34:31 +01003790 if (nla_len(attr) < sizeof(u8))
3791 return -EINVAL;
3792
Scott Feldman5111f802014-11-28 14:34:30 +01003793 if (nla_get_u8(attr))
3794 rocker_port->brport_flags |= BR_LEARNING_SYNC;
3795 else
3796 rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
3797 }
3798 }
3799
3800 return 0;
3801}
3802
3803static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3804 struct net_device *dev,
3805 u32 filter_mask)
3806{
3807 struct rocker_port *rocker_port = netdev_priv(dev);
Roopa Prabhu1d460b92014-12-08 14:04:20 -08003808 u16 mode = BRIDGE_MODE_UNDEF;
Scott Feldman5111f802014-11-28 14:34:30 +01003809 u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
3810
3811 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
3812 rocker_port->brport_flags, mask);
3813}
3814
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003815static int rocker_port_switch_parent_id_get(struct net_device *dev,
3816 struct netdev_phys_item_id *psid)
3817{
3818 struct rocker_port *rocker_port = netdev_priv(dev);
3819 struct rocker *rocker = rocker_port->rocker;
3820
3821 psid->id_len = sizeof(rocker->hw.id);
3822 memcpy(&psid->id, &rocker->hw.id, psid->id_len);
3823 return 0;
3824}
3825
Scott Feldman6c707942014-11-28 14:34:28 +01003826static int rocker_port_switch_port_stp_update(struct net_device *dev, u8 state)
3827{
3828 struct rocker_port *rocker_port = netdev_priv(dev);
3829
3830 return rocker_port_stp_update(rocker_port, state);
3831}
3832
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003833static const struct net_device_ops rocker_port_netdev_ops = {
3834 .ndo_open = rocker_port_open,
3835 .ndo_stop = rocker_port_stop,
3836 .ndo_start_xmit = rocker_port_xmit,
3837 .ndo_set_mac_address = rocker_port_set_mac_address,
Scott Feldman6c707942014-11-28 14:34:28 +01003838 .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
3839 .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
3840 .ndo_fdb_add = rocker_port_fdb_add,
3841 .ndo_fdb_del = rocker_port_fdb_del,
Jiri Pirkoce76ca62014-11-28 14:34:29 +01003842 .ndo_fdb_dump = rocker_port_fdb_dump,
Scott Feldman5111f802014-11-28 14:34:30 +01003843 .ndo_bridge_setlink = rocker_port_bridge_setlink,
3844 .ndo_bridge_getlink = rocker_port_bridge_getlink,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003845 .ndo_switch_parent_id_get = rocker_port_switch_parent_id_get,
Scott Feldman6c707942014-11-28 14:34:28 +01003846 .ndo_switch_port_stp_update = rocker_port_switch_port_stp_update,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01003847};
3848
3849/********************
3850 * ethtool interface
3851 ********************/
3852
3853static int rocker_port_get_settings(struct net_device *dev,
3854 struct ethtool_cmd *ecmd)
3855{
3856 struct rocker_port *rocker_port = netdev_priv(dev);
3857
3858 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
3859}
3860
3861static int rocker_port_set_settings(struct net_device *dev,
3862 struct ethtool_cmd *ecmd)
3863{
3864 struct rocker_port *rocker_port = netdev_priv(dev);
3865
3866 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
3867}
3868
3869static void rocker_port_get_drvinfo(struct net_device *dev,
3870 struct ethtool_drvinfo *drvinfo)
3871{
3872 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
3873 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
3874}
3875
David Ahern9766e972015-01-29 20:59:33 -07003876static struct rocker_port_stats {
3877 char str[ETH_GSTRING_LEN];
3878 int type;
3879} rocker_port_stats[] = {
3880 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
3881 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
3882 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
3883 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
3884
3885 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
3886 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
3887 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
3888 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
3889};
3890
3891#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
3892
3893static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
3894 u8 *data)
3895{
3896 u8 *p = data;
3897 int i;
3898
3899 switch (stringset) {
3900 case ETH_SS_STATS:
3901 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
3902 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
3903 p += ETH_GSTRING_LEN;
3904 }
3905 break;
3906 }
3907}
3908
3909static int
3910rocker_cmd_get_port_stats_prep(struct rocker *rocker,
3911 struct rocker_port *rocker_port,
3912 struct rocker_desc_info *desc_info,
3913 void *priv)
3914{
3915 struct rocker_tlv *cmd_stats;
3916
3917 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
3918 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
3919 return -EMSGSIZE;
3920
3921 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
3922 if (!cmd_stats)
3923 return -EMSGSIZE;
3924
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003925 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
3926 rocker_port->pport))
David Ahern9766e972015-01-29 20:59:33 -07003927 return -EMSGSIZE;
3928
3929 rocker_tlv_nest_end(desc_info, cmd_stats);
3930
3931 return 0;
3932}
3933
3934static int
3935rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
3936 struct rocker_port *rocker_port,
3937 struct rocker_desc_info *desc_info,
3938 void *priv)
3939{
3940 struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
3941 struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
3942 struct rocker_tlv *pattr;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003943 u32 pport;
David Ahern9766e972015-01-29 20:59:33 -07003944 u64 *data = priv;
3945 int i;
3946
3947 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
3948
3949 if (!attrs[ROCKER_TLV_CMD_INFO])
3950 return -EIO;
3951
3952 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
3953 attrs[ROCKER_TLV_CMD_INFO]);
3954
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003955 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
David Ahern9766e972015-01-29 20:59:33 -07003956 return -EIO;
3957
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003958 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
3959 if (pport != rocker_port->pport)
David Ahern9766e972015-01-29 20:59:33 -07003960 return -EIO;
3961
3962 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
3963 pattr = stats_attrs[rocker_port_stats[i].type];
3964 if (!pattr)
3965 continue;
3966
3967 data[i] = rocker_tlv_get_u64(pattr);
3968 }
3969
3970 return 0;
3971}
3972
3973static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
3974 void *priv)
3975{
3976 return rocker_cmd_exec(rocker_port->rocker, rocker_port,
3977 rocker_cmd_get_port_stats_prep, NULL,
3978 rocker_cmd_get_port_stats_ethtool_proc,
3979 priv, false);
3980}
3981
3982static void rocker_port_get_stats(struct net_device *dev,
3983 struct ethtool_stats *stats, u64 *data)
3984{
3985 struct rocker_port *rocker_port = netdev_priv(dev);
3986
3987 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
3988 int i;
3989
3990 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
3991 data[i] = 0;
3992 }
3993
3994 return;
3995}
3996
3997static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
3998{
3999 switch (sset) {
4000 case ETH_SS_STATS:
4001 return ROCKER_PORT_STATS_LEN;
4002 default:
4003 return -EOPNOTSUPP;
4004 }
4005}
4006
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004007static const struct ethtool_ops rocker_port_ethtool_ops = {
4008 .get_settings = rocker_port_get_settings,
4009 .set_settings = rocker_port_set_settings,
4010 .get_drvinfo = rocker_port_get_drvinfo,
4011 .get_link = ethtool_op_get_link,
David Ahern9766e972015-01-29 20:59:33 -07004012 .get_strings = rocker_port_get_strings,
4013 .get_ethtool_stats = rocker_port_get_stats,
4014 .get_sset_count = rocker_port_get_sset_count,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004015};
4016
4017/*****************
4018 * NAPI interface
4019 *****************/
4020
4021static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4022{
4023 return container_of(napi, struct rocker_port, napi_tx);
4024}
4025
4026static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4027{
4028 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4029 struct rocker *rocker = rocker_port->rocker;
4030 struct rocker_desc_info *desc_info;
4031 u32 credits = 0;
4032 int err;
4033
4034 /* Cleanup tx descriptors */
4035 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
David Ahernf2bbca52015-01-16 14:22:29 -07004036 struct sk_buff *skb;
4037
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004038 err = rocker_desc_err(desc_info);
4039 if (err && net_ratelimit())
4040 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4041 err);
4042 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
David Ahernf2bbca52015-01-16 14:22:29 -07004043
4044 skb = rocker_desc_cookie_ptr_get(desc_info);
4045 if (err == 0) {
4046 rocker_port->dev->stats.tx_packets++;
4047 rocker_port->dev->stats.tx_bytes += skb->len;
4048 } else
4049 rocker_port->dev->stats.tx_errors++;
4050
4051 dev_kfree_skb_any(skb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004052 credits++;
4053 }
4054
4055 if (credits && netif_queue_stopped(rocker_port->dev))
4056 netif_wake_queue(rocker_port->dev);
4057
4058 napi_complete(napi);
4059 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4060
4061 return 0;
4062}
4063
4064static int rocker_port_rx_proc(struct rocker *rocker,
4065 struct rocker_port *rocker_port,
4066 struct rocker_desc_info *desc_info)
4067{
4068 struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4069 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4070 size_t rx_len;
4071
4072 if (!skb)
4073 return -ENOENT;
4074
4075 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4076 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4077 return -EINVAL;
4078
4079 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4080
4081 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4082 skb_put(skb, rx_len);
4083 skb->protocol = eth_type_trans(skb, rocker_port->dev);
David Ahernf2bbca52015-01-16 14:22:29 -07004084
4085 rocker_port->dev->stats.rx_packets++;
4086 rocker_port->dev->stats.rx_bytes += skb->len;
4087
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004088 netif_receive_skb(skb);
4089
4090 return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
4091}
4092
4093static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4094{
4095 return container_of(napi, struct rocker_port, napi_rx);
4096}
4097
4098static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4099{
4100 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4101 struct rocker *rocker = rocker_port->rocker;
4102 struct rocker_desc_info *desc_info;
4103 u32 credits = 0;
4104 int err;
4105
4106 /* Process rx descriptors */
4107 while (credits < budget &&
4108 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4109 err = rocker_desc_err(desc_info);
4110 if (err) {
4111 if (net_ratelimit())
4112 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4113 err);
4114 } else {
4115 err = rocker_port_rx_proc(rocker, rocker_port,
4116 desc_info);
4117 if (err && net_ratelimit())
4118 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4119 err);
4120 }
David Ahernf2bbca52015-01-16 14:22:29 -07004121 if (err)
4122 rocker_port->dev->stats.rx_errors++;
4123
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004124 rocker_desc_gen_clear(desc_info);
4125 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4126 credits++;
4127 }
4128
4129 if (credits < budget)
4130 napi_complete(napi);
4131
4132 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4133
4134 return credits;
4135}
4136
4137/*****************
4138 * PCI driver ops
4139 *****************/
4140
4141static void rocker_carrier_init(struct rocker_port *rocker_port)
4142{
4143 struct rocker *rocker = rocker_port->rocker;
4144 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4145 bool link_up;
4146
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004147 link_up = link_status & (1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004148 if (link_up)
4149 netif_carrier_on(rocker_port->dev);
4150 else
4151 netif_carrier_off(rocker_port->dev);
4152}
4153
4154static void rocker_remove_ports(struct rocker *rocker)
4155{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004156 struct rocker_port *rocker_port;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004157 int i;
4158
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004159 for (i = 0; i < rocker->port_count; i++) {
4160 rocker_port = rocker->ports[i];
4161 rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
4162 unregister_netdev(rocker_port->dev);
4163 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004164 kfree(rocker->ports);
4165}
4166
4167static void rocker_port_dev_addr_init(struct rocker *rocker,
4168 struct rocker_port *rocker_port)
4169{
4170 struct pci_dev *pdev = rocker->pdev;
4171 int err;
4172
4173 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4174 rocker_port->dev->dev_addr);
4175 if (err) {
4176 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4177 eth_hw_addr_random(rocker_port->dev);
4178 }
4179}
4180
4181static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4182{
4183 struct pci_dev *pdev = rocker->pdev;
4184 struct rocker_port *rocker_port;
4185 struct net_device *dev;
4186 int err;
4187
4188 dev = alloc_etherdev(sizeof(struct rocker_port));
4189 if (!dev)
4190 return -ENOMEM;
4191 rocker_port = netdev_priv(dev);
4192 rocker_port->dev = dev;
4193 rocker_port->rocker = rocker;
4194 rocker_port->port_number = port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004195 rocker_port->pport = port_number + 1;
Scott Feldman5111f802014-11-28 14:34:30 +01004196 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004197
4198 rocker_port_dev_addr_init(rocker, rocker_port);
4199 dev->netdev_ops = &rocker_port_netdev_ops;
4200 dev->ethtool_ops = &rocker_port_ethtool_ops;
4201 netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4202 NAPI_POLL_WEIGHT);
4203 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4204 NAPI_POLL_WEIGHT);
4205 rocker_carrier_init(rocker_port);
4206
Roopa Prabhueb0ac422015-01-29 22:40:15 -08004207 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4208 NETIF_F_HW_SWITCH_OFFLOAD;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004209
4210 err = register_netdev(dev);
4211 if (err) {
4212 dev_err(&pdev->dev, "register_netdev failed\n");
4213 goto err_register_netdev;
4214 }
4215 rocker->ports[port_number] = rocker_port;
4216
Scott Feldman5111f802014-11-28 14:34:30 +01004217 rocker_port_set_learning(rocker_port);
4218
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004219 rocker_port->internal_vlan_id =
4220 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
4221 err = rocker_port_ig_tbl(rocker_port, 0);
4222 if (err) {
4223 dev_err(&pdev->dev, "install ig port table failed\n");
4224 goto err_port_ig_tbl;
4225 }
4226
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004227 return 0;
4228
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004229err_port_ig_tbl:
4230 unregister_netdev(dev);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004231err_register_netdev:
4232 free_netdev(dev);
4233 return err;
4234}
4235
4236static int rocker_probe_ports(struct rocker *rocker)
4237{
4238 int i;
4239 size_t alloc_size;
4240 int err;
4241
4242 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4243 rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
Dan Carpentere65ad3b2015-02-25 16:35:32 +03004244 if (!rocker->ports)
4245 return -ENOMEM;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004246 for (i = 0; i < rocker->port_count; i++) {
4247 err = rocker_probe_port(rocker, i);
4248 if (err)
4249 goto remove_ports;
4250 }
4251 return 0;
4252
4253remove_ports:
4254 rocker_remove_ports(rocker);
4255 return err;
4256}
4257
4258static int rocker_msix_init(struct rocker *rocker)
4259{
4260 struct pci_dev *pdev = rocker->pdev;
4261 int msix_entries;
4262 int i;
4263 int err;
4264
4265 msix_entries = pci_msix_vec_count(pdev);
4266 if (msix_entries < 0)
4267 return msix_entries;
4268
4269 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
4270 return -EINVAL;
4271
4272 rocker->msix_entries = kmalloc_array(msix_entries,
4273 sizeof(struct msix_entry),
4274 GFP_KERNEL);
4275 if (!rocker->msix_entries)
4276 return -ENOMEM;
4277
4278 for (i = 0; i < msix_entries; i++)
4279 rocker->msix_entries[i].entry = i;
4280
4281 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
4282 if (err < 0)
4283 goto err_enable_msix;
4284
4285 return 0;
4286
4287err_enable_msix:
4288 kfree(rocker->msix_entries);
4289 return err;
4290}
4291
4292static void rocker_msix_fini(struct rocker *rocker)
4293{
4294 pci_disable_msix(rocker->pdev);
4295 kfree(rocker->msix_entries);
4296}
4297
4298static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4299{
4300 struct rocker *rocker;
4301 int err;
4302
4303 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
4304 if (!rocker)
4305 return -ENOMEM;
4306
4307 err = pci_enable_device(pdev);
4308 if (err) {
4309 dev_err(&pdev->dev, "pci_enable_device failed\n");
4310 goto err_pci_enable_device;
4311 }
4312
4313 err = pci_request_regions(pdev, rocker_driver_name);
4314 if (err) {
4315 dev_err(&pdev->dev, "pci_request_regions failed\n");
4316 goto err_pci_request_regions;
4317 }
4318
4319 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4320 if (!err) {
4321 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4322 if (err) {
4323 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
4324 goto err_pci_set_dma_mask;
4325 }
4326 } else {
4327 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4328 if (err) {
4329 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
4330 goto err_pci_set_dma_mask;
4331 }
4332 }
4333
4334 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
4335 dev_err(&pdev->dev, "invalid PCI region size\n");
4336 goto err_pci_resource_len_check;
4337 }
4338
4339 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
4340 pci_resource_len(pdev, 0));
4341 if (!rocker->hw_addr) {
4342 dev_err(&pdev->dev, "ioremap failed\n");
4343 err = -EIO;
4344 goto err_ioremap;
4345 }
4346 pci_set_master(pdev);
4347
4348 rocker->pdev = pdev;
4349 pci_set_drvdata(pdev, rocker);
4350
4351 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
4352
4353 err = rocker_msix_init(rocker);
4354 if (err) {
4355 dev_err(&pdev->dev, "MSI-X init failed\n");
4356 goto err_msix_init;
4357 }
4358
4359 err = rocker_basic_hw_test(rocker);
4360 if (err) {
4361 dev_err(&pdev->dev, "basic hw test failed\n");
4362 goto err_basic_hw_test;
4363 }
4364
4365 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4366
4367 err = rocker_dma_rings_init(rocker);
4368 if (err)
4369 goto err_dma_rings_init;
4370
4371 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
4372 rocker_cmd_irq_handler, 0,
4373 rocker_driver_name, rocker);
4374 if (err) {
4375 dev_err(&pdev->dev, "cannot assign cmd irq\n");
4376 goto err_request_cmd_irq;
4377 }
4378
4379 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
4380 rocker_event_irq_handler, 0,
4381 rocker_driver_name, rocker);
4382 if (err) {
4383 dev_err(&pdev->dev, "cannot assign event irq\n");
4384 goto err_request_event_irq;
4385 }
4386
4387 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
4388
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004389 err = rocker_init_tbls(rocker);
4390 if (err) {
4391 dev_err(&pdev->dev, "cannot init rocker tables\n");
4392 goto err_init_tbls;
4393 }
4394
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004395 err = rocker_probe_ports(rocker);
4396 if (err) {
4397 dev_err(&pdev->dev, "failed to probe ports\n");
4398 goto err_probe_ports;
4399 }
4400
4401 dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
4402
4403 return 0;
4404
4405err_probe_ports:
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004406 rocker_free_tbls(rocker);
4407err_init_tbls:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004408 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4409err_request_event_irq:
4410 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4411err_request_cmd_irq:
4412 rocker_dma_rings_fini(rocker);
4413err_dma_rings_init:
4414err_basic_hw_test:
4415 rocker_msix_fini(rocker);
4416err_msix_init:
4417 iounmap(rocker->hw_addr);
4418err_ioremap:
4419err_pci_resource_len_check:
4420err_pci_set_dma_mask:
4421 pci_release_regions(pdev);
4422err_pci_request_regions:
4423 pci_disable_device(pdev);
4424err_pci_enable_device:
4425 kfree(rocker);
4426 return err;
4427}
4428
4429static void rocker_remove(struct pci_dev *pdev)
4430{
4431 struct rocker *rocker = pci_get_drvdata(pdev);
4432
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004433 rocker_free_tbls(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004434 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4435 rocker_remove_ports(rocker);
4436 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4437 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4438 rocker_dma_rings_fini(rocker);
4439 rocker_msix_fini(rocker);
4440 iounmap(rocker->hw_addr);
4441 pci_release_regions(rocker->pdev);
4442 pci_disable_device(rocker->pdev);
4443 kfree(rocker);
4444}
4445
4446static struct pci_driver rocker_pci_driver = {
4447 .name = rocker_driver_name,
4448 .id_table = rocker_pci_id_table,
4449 .probe = rocker_probe,
4450 .remove = rocker_remove,
4451};
4452
Scott Feldman6c707942014-11-28 14:34:28 +01004453/************************************
4454 * Net device notifier event handler
4455 ************************************/
4456
4457static bool rocker_port_dev_check(struct net_device *dev)
4458{
4459 return dev->netdev_ops == &rocker_port_netdev_ops;
4460}
4461
4462static int rocker_port_bridge_join(struct rocker_port *rocker_port,
4463 struct net_device *bridge)
4464{
4465 int err;
4466
4467 rocker_port_internal_vlan_id_put(rocker_port,
4468 rocker_port->dev->ifindex);
4469
4470 rocker_port->bridge_dev = bridge;
4471
4472 /* Use bridge internal VLAN ID for untagged pkts */
4473 err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4474 if (err)
4475 return err;
4476 rocker_port->internal_vlan_id =
4477 rocker_port_internal_vlan_id_get(rocker_port,
4478 bridge->ifindex);
Scott Feldmane47172a2015-02-25 20:15:38 -08004479 return rocker_port_vlan(rocker_port, 0, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01004480}
4481
4482static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
4483{
4484 int err;
4485
4486 rocker_port_internal_vlan_id_put(rocker_port,
4487 rocker_port->bridge_dev->ifindex);
4488
4489 rocker_port->bridge_dev = NULL;
4490
4491 /* Use port internal VLAN ID for untagged pkts */
4492 err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4493 if (err)
4494 return err;
4495 rocker_port->internal_vlan_id =
4496 rocker_port_internal_vlan_id_get(rocker_port,
4497 rocker_port->dev->ifindex);
4498 err = rocker_port_vlan(rocker_port, 0, 0);
Scott Feldmane47172a2015-02-25 20:15:38 -08004499 if (err)
4500 return err;
4501
4502 if (rocker_port->dev->flags & IFF_UP)
4503 err = rocker_port_fwd_enable(rocker_port);
Scott Feldman6c707942014-11-28 14:34:28 +01004504
4505 return err;
4506}
4507
4508static int rocker_port_master_changed(struct net_device *dev)
4509{
4510 struct rocker_port *rocker_port = netdev_priv(dev);
4511 struct net_device *master = netdev_master_upper_dev_get(dev);
4512 int err = 0;
4513
4514 if (master && master->rtnl_link_ops &&
4515 !strcmp(master->rtnl_link_ops->kind, "bridge"))
4516 err = rocker_port_bridge_join(rocker_port, master);
4517 else
4518 err = rocker_port_bridge_leave(rocker_port);
4519
4520 return err;
4521}
4522
4523static int rocker_netdevice_event(struct notifier_block *unused,
4524 unsigned long event, void *ptr)
4525{
4526 struct net_device *dev;
4527 int err;
4528
4529 switch (event) {
4530 case NETDEV_CHANGEUPPER:
4531 dev = netdev_notifier_info_to_dev(ptr);
4532 if (!rocker_port_dev_check(dev))
4533 return NOTIFY_DONE;
4534 err = rocker_port_master_changed(dev);
4535 if (err)
4536 netdev_warn(dev,
4537 "failed to reflect master change (err %d)\n",
4538 err);
4539 break;
4540 }
4541
4542 return NOTIFY_DONE;
4543}
4544
4545static struct notifier_block rocker_netdevice_nb __read_mostly = {
4546 .notifier_call = rocker_netdevice_event,
4547};
4548
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004549/***********************
4550 * Module init and exit
4551 ***********************/
4552
4553static int __init rocker_module_init(void)
4554{
Scott Feldman6c707942014-11-28 14:34:28 +01004555 int err;
4556
4557 register_netdevice_notifier(&rocker_netdevice_nb);
4558 err = pci_register_driver(&rocker_pci_driver);
4559 if (err)
4560 goto err_pci_register_driver;
4561 return 0;
4562
4563err_pci_register_driver:
4564 unregister_netdevice_notifier(&rocker_netdevice_nb);
4565 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004566}
4567
4568static void __exit rocker_module_exit(void)
4569{
Scott Feldman6c707942014-11-28 14:34:28 +01004570 unregister_netdevice_notifier(&rocker_netdevice_nb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004571 pci_unregister_driver(&rocker_pci_driver);
4572}
4573
4574module_init(rocker_module_init);
4575module_exit(rocker_module_exit);
4576
4577MODULE_LICENSE("GPL v2");
4578MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
4579MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
4580MODULE_DESCRIPTION("Rocker switch device driver");
4581MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);