blob: 02602491dba3de96e217b9f5dc9f509e9c2c9634 [file] [log] [blame]
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010019#include <linux/hashtable.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010020#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
Scott Feldman6c707942014-11-28 14:34:28 +010031#include <linux/if_bridge.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010032#include <linux/bitops.h>
David Aherndb191702015-03-17 20:23:16 -060033#include <linux/ctype.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010034#include <net/switchdev.h>
35#include <net/rtnetlink.h>
Scott Feldmanc1beeef2015-03-05 21:21:20 -080036#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
Christoph Hellwig2f8e2c82015-08-28 09:27:14 +020039#include <linux/io-64-nonatomic-lo-hi.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010040#include <generated/utsrelease.h>
41
Jiri Pirko0fe685f2016-02-16 15:14:40 +010042#include "rocker_hw.h"
Jiri Pirkode152192016-02-16 15:14:42 +010043#include "rocker.h"
44#include "rocker_tlv.h"
Jiri Pirko4b8ac962014-11-28 14:34:26 +010045
46static const char rocker_driver_name[] = "rocker";
47
48static const struct pci_device_id rocker_pci_id_table[] = {
49 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
50 {0, }
51};
52
Scott Feldman9f6bbf72014-11-28 14:34:27 +010053struct rocker_flow_tbl_key {
54 u32 priority;
55 enum rocker_of_dpa_table_id tbl_id;
56 union {
57 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080058 u32 in_pport;
59 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010060 enum rocker_of_dpa_table_id goto_tbl;
61 } ig_port;
62 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080063 u32 in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010064 __be16 vlan_id;
65 __be16 vlan_id_mask;
66 enum rocker_of_dpa_table_id goto_tbl;
67 bool untagged;
68 __be16 new_vlan_id;
69 } vlan;
70 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080071 u32 in_pport;
72 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010073 __be16 eth_type;
74 u8 eth_dst[ETH_ALEN];
75 u8 eth_dst_mask[ETH_ALEN];
76 __be16 vlan_id;
77 __be16 vlan_id_mask;
78 enum rocker_of_dpa_table_id goto_tbl;
79 bool copy_to_cpu;
80 } term_mac;
81 struct {
82 __be16 eth_type;
83 __be32 dst4;
84 __be32 dst4_mask;
85 enum rocker_of_dpa_table_id goto_tbl;
86 u32 group_id;
87 } ucast_routing;
88 struct {
89 u8 eth_dst[ETH_ALEN];
90 u8 eth_dst_mask[ETH_ALEN];
91 int has_eth_dst;
92 int has_eth_dst_mask;
93 __be16 vlan_id;
94 u32 tunnel_id;
95 enum rocker_of_dpa_table_id goto_tbl;
96 u32 group_id;
97 bool copy_to_cpu;
98 } bridge;
99 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800100 u32 in_pport;
101 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100102 u8 eth_src[ETH_ALEN];
103 u8 eth_src_mask[ETH_ALEN];
104 u8 eth_dst[ETH_ALEN];
105 u8 eth_dst_mask[ETH_ALEN];
106 __be16 eth_type;
107 __be16 vlan_id;
108 __be16 vlan_id_mask;
109 u8 ip_proto;
110 u8 ip_proto_mask;
111 u8 ip_tos;
112 u8 ip_tos_mask;
113 u32 group_id;
114 } acl;
115 };
116};
117
118struct rocker_flow_tbl_entry {
119 struct hlist_node entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800120 u32 cmd;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100121 u64 cookie;
122 struct rocker_flow_tbl_key key;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800123 size_t key_len;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100124 u32 key_crc32; /* key */
125};
126
127struct rocker_group_tbl_entry {
128 struct hlist_node entry;
129 u32 cmd;
130 u32 group_id; /* key */
131 u16 group_count;
132 u32 *group_ids;
133 union {
134 struct {
135 u8 pop_vlan;
136 } l2_interface;
137 struct {
138 u8 eth_src[ETH_ALEN];
139 u8 eth_dst[ETH_ALEN];
140 __be16 vlan_id;
141 u32 group_id;
142 } l2_rewrite;
143 struct {
144 u8 eth_src[ETH_ALEN];
145 u8 eth_dst[ETH_ALEN];
146 __be16 vlan_id;
147 bool ttl_check;
148 u32 group_id;
149 } l3_unicast;
150 };
151};
152
153struct rocker_fdb_tbl_entry {
154 struct hlist_node entry;
155 u32 key_crc32; /* key */
156 bool learned;
Scott Feldmana471be42015-09-23 08:39:14 -0700157 unsigned long touched;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100158 struct rocker_fdb_tbl_key {
Scott Feldman4c660492015-09-23 08:39:15 -0700159 struct rocker_port *rocker_port;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100160 u8 addr[ETH_ALEN];
161 __be16 vlan_id;
162 } key;
163};
164
165struct rocker_internal_vlan_tbl_entry {
166 struct hlist_node entry;
167 int ifindex; /* key */
168 u32 ref_count;
169 __be16 vlan_id;
170};
171
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800172struct rocker_neigh_tbl_entry {
173 struct hlist_node entry;
174 __be32 ip_addr; /* key */
175 struct net_device *dev;
176 u32 ref_count;
177 u32 index;
178 u8 eth_dst[ETH_ALEN];
179 bool ttl_check;
180};
181
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100182static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
183static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
184static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
185static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
186static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
187static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
188static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
189static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
190static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
191
192/* Rocker priority levels for flow table entries. Higher
193 * priority match takes precedence over lower priority match.
194 */
195
196enum {
197 ROCKER_PRIORITY_UNKNOWN = 0,
198 ROCKER_PRIORITY_IG_PORT = 1,
199 ROCKER_PRIORITY_VLAN = 1,
200 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
201 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100202 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
203 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
204 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
205 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
206 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
207 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
208 ROCKER_PRIORITY_ACL_CTRL = 3,
209 ROCKER_PRIORITY_ACL_NORMAL = 2,
210 ROCKER_PRIORITY_ACL_DFLT = 1,
211};
212
213static bool rocker_vlan_id_is_internal(__be16 vlan_id)
214{
215 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
216 u16 end = 0xffe;
217 u16 _vlan_id = ntohs(vlan_id);
218
219 return (_vlan_id >= start && _vlan_id <= end);
220}
221
Simon Hormane5054642015-05-25 14:28:36 +0900222static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100223 u16 vid, bool *pop_vlan)
224{
225 __be16 vlan_id;
226
227 if (pop_vlan)
228 *pop_vlan = false;
229 vlan_id = htons(vid);
230 if (!vlan_id) {
231 vlan_id = rocker_port->internal_vlan_id;
232 if (pop_vlan)
233 *pop_vlan = true;
234 }
235
236 return vlan_id;
237}
238
Simon Hormane5054642015-05-25 14:28:36 +0900239static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +0100240 __be16 vlan_id)
241{
242 if (rocker_vlan_id_is_internal(vlan_id))
243 return 0;
244
245 return ntohs(vlan_id);
246}
247
Simon Hormane5054642015-05-25 14:28:36 +0900248static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +0100249{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200250 return rocker_port->bridge_dev &&
251 netif_is_bridge_master(rocker_port->bridge_dev);
Simon Horman82549732015-07-16 10:39:14 +0900252}
253
254static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
255{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200256 return rocker_port->bridge_dev &&
257 netif_is_ovs_master(rocker_port->bridge_dev);
Scott Feldman6c707942014-11-28 14:34:28 +0100258}
259
Scott Feldman179f9a22015-06-12 21:35:46 -0700260#define ROCKER_OP_FLAG_REMOVE BIT(0)
261#define ROCKER_OP_FLAG_NOWAIT BIT(1)
262#define ROCKER_OP_FLAG_LEARNED BIT(2)
263#define ROCKER_OP_FLAG_REFRESH BIT(3)
264
Jiri Pirko53901cc2016-02-16 15:14:49 +0100265static bool rocker_flags_nowait(int flags)
266{
267 return flags & ROCKER_OP_FLAG_NOWAIT;
268}
269
Jiri Pirkob15edf82016-02-16 15:14:39 +0100270static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
271 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700272{
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200273 struct switchdev_trans_item *elem = NULL;
Scott Feldman179f9a22015-06-12 21:35:46 -0700274 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
275 GFP_ATOMIC : GFP_KERNEL;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700276
277 /* If in transaction prepare phase, allocate the memory
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200278 * and enqueue it on a transaction. If in transaction
279 * commit phase, dequeue the memory from the transaction
Scott Feldmanc4f20322015-05-10 09:47:50 -0700280 * rather than re-allocating the memory. The idea is the
281 * driver code paths for prepare and commit are identical
282 * so the memory allocated in the prepare phase is the
283 * memory used in the commit phase.
284 */
285
Jiri Pirko76c6f942015-09-24 10:02:44 +0200286 if (!trans) {
287 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Jiri Pirko76c6f942015-09-24 10:02:44 +0200288 } else if (switchdev_trans_ph_prepare(trans)) {
Scott Feldman179f9a22015-06-12 21:35:46 -0700289 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700290 if (!elem)
291 return NULL;
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200292 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
Jiri Pirko76c6f942015-09-24 10:02:44 +0200293 } else {
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200294 elem = switchdev_trans_item_dequeue(trans);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700295 }
296
297 return elem ? elem + 1 : NULL;
298}
299
Jiri Pirkob15edf82016-02-16 15:14:39 +0100300static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
301 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700302{
Jiri Pirkob15edf82016-02-16 15:14:39 +0100303 return __rocker_mem_alloc(trans, flags, size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700304}
305
Jiri Pirkob15edf82016-02-16 15:14:39 +0100306static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
307 size_t n, size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700308{
Jiri Pirkob15edf82016-02-16 15:14:39 +0100309 return __rocker_mem_alloc(trans, flags, n * size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700310}
311
Jiri Pirkob15edf82016-02-16 15:14:39 +0100312static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700313{
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200314 struct switchdev_trans_item *elem;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700315
316 /* Frees are ignored if in transaction prepare phase. The
317 * memory remains on the per-port list until freed in the
318 * commit phase.
319 */
320
Jiri Pirko76c6f942015-09-24 10:02:44 +0200321 if (switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -0700322 return;
323
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200324 elem = (struct switchdev_trans_item *) mem - 1;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700325 kfree(elem);
326}
327
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100328struct rocker_wait {
329 wait_queue_head_t wait;
330 bool done;
Scott Feldman179f9a22015-06-12 21:35:46 -0700331 bool nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100332};
333
334static void rocker_wait_reset(struct rocker_wait *wait)
335{
336 wait->done = false;
Scott Feldman179f9a22015-06-12 21:35:46 -0700337 wait->nowait = false;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100338}
339
340static void rocker_wait_init(struct rocker_wait *wait)
341{
342 init_waitqueue_head(&wait->wait);
343 rocker_wait_reset(wait);
344}
345
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100346static struct rocker_wait *rocker_wait_create(void)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100347{
348 struct rocker_wait *wait;
349
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100350 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100351 if (!wait)
352 return NULL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100353 return wait;
354}
355
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100356static void rocker_wait_destroy(struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100357{
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100358 kfree(wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100359}
360
361static bool rocker_wait_event_timeout(struct rocker_wait *wait,
362 unsigned long timeout)
363{
364 wait_event_timeout(wait->wait, wait->done, HZ / 10);
365 if (!wait->done)
366 return false;
367 return true;
368}
369
370static void rocker_wait_wake_up(struct rocker_wait *wait)
371{
372 wait->done = true;
373 wake_up(&wait->wait);
374}
375
Simon Hormane5054642015-05-25 14:28:36 +0900376static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100377{
378 return rocker->msix_entries[vector].vector;
379}
380
Simon Hormane5054642015-05-25 14:28:36 +0900381static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100382{
383 return rocker_msix_vector(rocker_port->rocker,
384 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
385}
386
Simon Hormane5054642015-05-25 14:28:36 +0900387static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100388{
389 return rocker_msix_vector(rocker_port->rocker,
390 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
391}
392
393#define rocker_write32(rocker, reg, val) \
394 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
395#define rocker_read32(rocker, reg) \
396 readl((rocker)->hw_addr + (ROCKER_ ## reg))
397#define rocker_write64(rocker, reg, val) \
398 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
399#define rocker_read64(rocker, reg) \
400 readq((rocker)->hw_addr + (ROCKER_ ## reg))
401
402/*****************************
403 * HW basic testing functions
404 *****************************/
405
Simon Hormane5054642015-05-25 14:28:36 +0900406static int rocker_reg_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100407{
Simon Hormane5054642015-05-25 14:28:36 +0900408 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100409 u64 test_reg;
410 u64 rnd;
411
412 rnd = prandom_u32();
413 rnd >>= 1;
414 rocker_write32(rocker, TEST_REG, rnd);
415 test_reg = rocker_read32(rocker, TEST_REG);
416 if (test_reg != rnd * 2) {
417 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
418 test_reg, rnd * 2);
419 return -EIO;
420 }
421
422 rnd = prandom_u32();
423 rnd <<= 31;
424 rnd |= prandom_u32();
425 rocker_write64(rocker, TEST_REG64, rnd);
426 test_reg = rocker_read64(rocker, TEST_REG64);
427 if (test_reg != rnd * 2) {
428 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
429 test_reg, rnd * 2);
430 return -EIO;
431 }
432
433 return 0;
434}
435
Simon Hormane5054642015-05-25 14:28:36 +0900436static int rocker_dma_test_one(const struct rocker *rocker,
437 struct rocker_wait *wait, u32 test_type,
438 dma_addr_t dma_handle, const unsigned char *buf,
439 const unsigned char *expect, size_t size)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100440{
Simon Hormane5054642015-05-25 14:28:36 +0900441 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100442 int i;
443
444 rocker_wait_reset(wait);
445 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
446
447 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
448 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
449 return -EIO;
450 }
451
452 for (i = 0; i < size; i++) {
453 if (buf[i] != expect[i]) {
454 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
455 buf[i], i, expect[i]);
456 return -EIO;
457 }
458 }
459 return 0;
460}
461
462#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
463#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
464
Simon Hormane5054642015-05-25 14:28:36 +0900465static int rocker_dma_test_offset(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100466 struct rocker_wait *wait, int offset)
467{
468 struct pci_dev *pdev = rocker->pdev;
469 unsigned char *alloc;
470 unsigned char *buf;
471 unsigned char *expect;
472 dma_addr_t dma_handle;
473 int i;
474 int err;
475
476 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
477 GFP_KERNEL | GFP_DMA);
478 if (!alloc)
479 return -ENOMEM;
480 buf = alloc + offset;
481 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
482
483 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
484 PCI_DMA_BIDIRECTIONAL);
485 if (pci_dma_mapping_error(pdev, dma_handle)) {
486 err = -EIO;
487 goto free_alloc;
488 }
489
490 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
491 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
492
493 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
494 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
495 dma_handle, buf, expect,
496 ROCKER_TEST_DMA_BUF_SIZE);
497 if (err)
498 goto unmap;
499
500 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
501 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
502 dma_handle, buf, expect,
503 ROCKER_TEST_DMA_BUF_SIZE);
504 if (err)
505 goto unmap;
506
507 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
508 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
509 expect[i] = ~buf[i];
510 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
511 dma_handle, buf, expect,
512 ROCKER_TEST_DMA_BUF_SIZE);
513 if (err)
514 goto unmap;
515
516unmap:
517 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
518 PCI_DMA_BIDIRECTIONAL);
519free_alloc:
520 kfree(alloc);
521
522 return err;
523}
524
Simon Hormane5054642015-05-25 14:28:36 +0900525static int rocker_dma_test(const struct rocker *rocker,
526 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100527{
528 int i;
529 int err;
530
531 for (i = 0; i < 8; i++) {
532 err = rocker_dma_test_offset(rocker, wait, i);
533 if (err)
534 return err;
535 }
536 return 0;
537}
538
539static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
540{
541 struct rocker_wait *wait = dev_id;
542
543 rocker_wait_wake_up(wait);
544
545 return IRQ_HANDLED;
546}
547
Simon Hormane5054642015-05-25 14:28:36 +0900548static int rocker_basic_hw_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100549{
Simon Hormane5054642015-05-25 14:28:36 +0900550 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100551 struct rocker_wait wait;
552 int err;
553
554 err = rocker_reg_test(rocker);
555 if (err) {
556 dev_err(&pdev->dev, "reg test failed\n");
557 return err;
558 }
559
560 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
561 rocker_test_irq_handler, 0,
562 rocker_driver_name, &wait);
563 if (err) {
564 dev_err(&pdev->dev, "cannot assign test irq\n");
565 return err;
566 }
567
568 rocker_wait_init(&wait);
569 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
570
571 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
572 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
573 err = -EIO;
574 goto free_irq;
575 }
576
577 err = rocker_dma_test(rocker, &wait);
578 if (err)
579 dev_err(&pdev->dev, "dma test failed\n");
580
581free_irq:
582 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
583 return err;
584}
585
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100586/******************************************
587 * DMA rings and descriptors manipulations
588 ******************************************/
589
590static u32 __pos_inc(u32 pos, size_t limit)
591{
592 return ++pos == limit ? 0 : pos;
593}
594
Simon Hormane5054642015-05-25 14:28:36 +0900595static int rocker_desc_err(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100596{
Scott Feldman7eb344f2015-02-25 20:15:36 -0800597 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
598
599 switch (err) {
600 case ROCKER_OK:
601 return 0;
602 case -ROCKER_ENOENT:
603 return -ENOENT;
604 case -ROCKER_ENXIO:
605 return -ENXIO;
606 case -ROCKER_ENOMEM:
607 return -ENOMEM;
608 case -ROCKER_EEXIST:
609 return -EEXIST;
610 case -ROCKER_EINVAL:
611 return -EINVAL;
612 case -ROCKER_EMSGSIZE:
613 return -EMSGSIZE;
614 case -ROCKER_ENOTSUP:
615 return -EOPNOTSUPP;
616 case -ROCKER_ENOBUFS:
617 return -ENOBUFS;
618 }
619
620 return -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100621}
622
Simon Hormane5054642015-05-25 14:28:36 +0900623static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100624{
625 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
626}
627
Simon Hormane5054642015-05-25 14:28:36 +0900628static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100629{
630 u32 comp_err = desc_info->desc->comp_err;
631
632 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
633}
634
Jiri Pirko11ce2ba2016-02-16 15:14:41 +0100635static void *
636rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100637{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100638 return (void *)(uintptr_t)desc_info->desc->cookie;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100639}
640
Simon Hormane5054642015-05-25 14:28:36 +0900641static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100642 void *ptr)
643{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100644 desc_info->desc->cookie = (uintptr_t) ptr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100645}
646
647static struct rocker_desc_info *
Simon Hormane5054642015-05-25 14:28:36 +0900648rocker_desc_head_get(const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100649{
650 static struct rocker_desc_info *desc_info;
651 u32 head = __pos_inc(info->head, info->size);
652
653 desc_info = &info->desc_info[info->head];
654 if (head == info->tail)
655 return NULL; /* ring full */
656 desc_info->tlv_size = 0;
657 return desc_info;
658}
659
Simon Hormane5054642015-05-25 14:28:36 +0900660static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100661{
662 desc_info->desc->buf_size = desc_info->data_size;
663 desc_info->desc->tlv_size = desc_info->tlv_size;
664}
665
Simon Hormane5054642015-05-25 14:28:36 +0900666static void rocker_desc_head_set(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100667 struct rocker_dma_ring_info *info,
Simon Hormane5054642015-05-25 14:28:36 +0900668 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100669{
670 u32 head = __pos_inc(info->head, info->size);
671
672 BUG_ON(head == info->tail);
673 rocker_desc_commit(desc_info);
674 info->head = head;
675 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
676}
677
678static struct rocker_desc_info *
679rocker_desc_tail_get(struct rocker_dma_ring_info *info)
680{
681 static struct rocker_desc_info *desc_info;
682
683 if (info->tail == info->head)
684 return NULL; /* nothing to be done between head and tail */
685 desc_info = &info->desc_info[info->tail];
686 if (!rocker_desc_gen(desc_info))
687 return NULL; /* gen bit not set, desc is not ready yet */
688 info->tail = __pos_inc(info->tail, info->size);
689 desc_info->tlv_size = desc_info->desc->tlv_size;
690 return desc_info;
691}
692
Simon Hormane5054642015-05-25 14:28:36 +0900693static void rocker_dma_ring_credits_set(const struct rocker *rocker,
694 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100695 u32 credits)
696{
697 if (credits)
698 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
699}
700
701static unsigned long rocker_dma_ring_size_fix(size_t size)
702{
703 return max(ROCKER_DMA_SIZE_MIN,
704 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
705}
706
Simon Hormane5054642015-05-25 14:28:36 +0900707static int rocker_dma_ring_create(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100708 unsigned int type,
709 size_t size,
710 struct rocker_dma_ring_info *info)
711{
712 int i;
713
714 BUG_ON(size != rocker_dma_ring_size_fix(size));
715 info->size = size;
716 info->type = type;
717 info->head = 0;
718 info->tail = 0;
719 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
720 GFP_KERNEL);
721 if (!info->desc_info)
722 return -ENOMEM;
723
724 info->desc = pci_alloc_consistent(rocker->pdev,
725 info->size * sizeof(*info->desc),
726 &info->mapaddr);
727 if (!info->desc) {
728 kfree(info->desc_info);
729 return -ENOMEM;
730 }
731
732 for (i = 0; i < info->size; i++)
733 info->desc_info[i].desc = &info->desc[i];
734
735 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
736 ROCKER_DMA_DESC_CTRL_RESET);
737 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
738 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
739
740 return 0;
741}
742
Simon Hormane5054642015-05-25 14:28:36 +0900743static void rocker_dma_ring_destroy(const struct rocker *rocker,
744 const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100745{
746 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
747
748 pci_free_consistent(rocker->pdev,
749 info->size * sizeof(struct rocker_desc),
750 info->desc, info->mapaddr);
751 kfree(info->desc_info);
752}
753
Simon Hormane5054642015-05-25 14:28:36 +0900754static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100755 struct rocker_dma_ring_info *info)
756{
757 int i;
758
759 BUG_ON(info->head || info->tail);
760
761 /* When ring is consumer, we need to advance head for each desc.
762 * That tells hw that the desc is ready to be used by it.
763 */
764 for (i = 0; i < info->size - 1; i++)
765 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
766 rocker_desc_commit(&info->desc_info[i]);
767}
768
Simon Hormane5054642015-05-25 14:28:36 +0900769static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
770 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100771 int direction, size_t buf_size)
772{
773 struct pci_dev *pdev = rocker->pdev;
774 int i;
775 int err;
776
777 for (i = 0; i < info->size; i++) {
778 struct rocker_desc_info *desc_info = &info->desc_info[i];
779 struct rocker_desc *desc = &info->desc[i];
780 dma_addr_t dma_handle;
781 char *buf;
782
783 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
784 if (!buf) {
785 err = -ENOMEM;
786 goto rollback;
787 }
788
789 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
790 if (pci_dma_mapping_error(pdev, dma_handle)) {
791 kfree(buf);
792 err = -EIO;
793 goto rollback;
794 }
795
796 desc_info->data = buf;
797 desc_info->data_size = buf_size;
798 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
799
800 desc->buf_addr = dma_handle;
801 desc->buf_size = buf_size;
802 }
803 return 0;
804
805rollback:
806 for (i--; i >= 0; i--) {
Simon Hormane5054642015-05-25 14:28:36 +0900807 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100808
809 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
810 desc_info->data_size, direction);
811 kfree(desc_info->data);
812 }
813 return err;
814}
815
Simon Hormane5054642015-05-25 14:28:36 +0900816static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
817 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100818 int direction)
819{
820 struct pci_dev *pdev = rocker->pdev;
821 int i;
822
823 for (i = 0; i < info->size; i++) {
Simon Hormane5054642015-05-25 14:28:36 +0900824 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100825 struct rocker_desc *desc = &info->desc[i];
826
827 desc->buf_addr = 0;
828 desc->buf_size = 0;
829 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
830 desc_info->data_size, direction);
831 kfree(desc_info->data);
832 }
833}
834
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100835static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info)
836{
837 struct rocker_wait *wait;
838
839 wait = rocker_wait_create();
840 if (!wait)
841 return -ENOMEM;
842 rocker_desc_cookie_ptr_set(desc_info, wait);
843 return 0;
844}
845
846static void
847rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info)
848{
849 struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info);
850
851 rocker_wait_destroy(wait);
852}
853
854static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker)
855{
856 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
857 int i;
858 int err;
859
860 for (i = 0; i < cmd_ring->size; i++) {
861 err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]);
862 if (err)
863 goto rollback;
864 }
865 return 0;
866
867rollback:
868 for (i--; i >= 0; i--)
869 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
870 return err;
871}
872
873static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker)
874{
875 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
876 int i;
877
878 for (i = 0; i < cmd_ring->size; i++)
879 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
880}
881
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100882static int rocker_dma_rings_init(struct rocker *rocker)
883{
Simon Hormane5054642015-05-25 14:28:36 +0900884 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100885 int err;
886
887 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
888 ROCKER_DMA_CMD_DEFAULT_SIZE,
889 &rocker->cmd_ring);
890 if (err) {
891 dev_err(&pdev->dev, "failed to create command dma ring\n");
892 return err;
893 }
894
895 spin_lock_init(&rocker->cmd_ring_lock);
896
897 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
898 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
899 if (err) {
900 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
901 goto err_dma_cmd_ring_bufs_alloc;
902 }
903
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100904 err = rocker_dma_cmd_ring_waits_alloc(rocker);
905 if (err) {
906 dev_err(&pdev->dev, "failed to alloc command dma ring waits\n");
907 goto err_dma_cmd_ring_waits_alloc;
908 }
909
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100910 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
911 ROCKER_DMA_EVENT_DEFAULT_SIZE,
912 &rocker->event_ring);
913 if (err) {
914 dev_err(&pdev->dev, "failed to create event dma ring\n");
915 goto err_dma_event_ring_create;
916 }
917
918 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
919 PCI_DMA_FROMDEVICE, PAGE_SIZE);
920 if (err) {
921 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
922 goto err_dma_event_ring_bufs_alloc;
923 }
924 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
925 return 0;
926
927err_dma_event_ring_bufs_alloc:
928 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
929err_dma_event_ring_create:
930 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
931 PCI_DMA_BIDIRECTIONAL);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100932err_dma_cmd_ring_waits_alloc:
933 rocker_dma_cmd_ring_waits_free(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100934err_dma_cmd_ring_bufs_alloc:
935 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
936 return err;
937}
938
939static void rocker_dma_rings_fini(struct rocker *rocker)
940{
941 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
942 PCI_DMA_BIDIRECTIONAL);
943 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100944 rocker_dma_cmd_ring_waits_free(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100945 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
946 PCI_DMA_BIDIRECTIONAL);
947 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
948}
949
Simon Horman534ba6a2015-06-01 13:25:04 +0900950static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100951 struct rocker_desc_info *desc_info,
952 struct sk_buff *skb, size_t buf_len)
953{
Simon Horman534ba6a2015-06-01 13:25:04 +0900954 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100955 struct pci_dev *pdev = rocker->pdev;
956 dma_addr_t dma_handle;
957
958 dma_handle = pci_map_single(pdev, skb->data, buf_len,
959 PCI_DMA_FROMDEVICE);
960 if (pci_dma_mapping_error(pdev, dma_handle))
961 return -EIO;
962 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
963 goto tlv_put_failure;
964 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
965 goto tlv_put_failure;
966 return 0;
967
968tlv_put_failure:
969 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
970 desc_info->tlv_size = 0;
971 return -EMSGSIZE;
972}
973
Simon Hormane5054642015-05-25 14:28:36 +0900974static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100975{
976 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
977}
978
Simon Horman534ba6a2015-06-01 13:25:04 +0900979static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100980 struct rocker_desc_info *desc_info)
981{
982 struct net_device *dev = rocker_port->dev;
983 struct sk_buff *skb;
984 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
985 int err;
986
987 /* Ensure that hw will see tlv_size zero in case of an error.
988 * That tells hw to use another descriptor.
989 */
990 rocker_desc_cookie_ptr_set(desc_info, NULL);
991 desc_info->tlv_size = 0;
992
993 skb = netdev_alloc_skb_ip_align(dev, buf_len);
994 if (!skb)
995 return -ENOMEM;
Simon Horman534ba6a2015-06-01 13:25:04 +0900996 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100997 if (err) {
998 dev_kfree_skb_any(skb);
999 return err;
1000 }
1001 rocker_desc_cookie_ptr_set(desc_info, skb);
1002 return 0;
1003}
1004
Simon Hormane5054642015-05-25 14:28:36 +09001005static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1006 const struct rocker_tlv **attrs)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001007{
1008 struct pci_dev *pdev = rocker->pdev;
1009 dma_addr_t dma_handle;
1010 size_t len;
1011
1012 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1013 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1014 return;
1015 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1016 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1017 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1018}
1019
Simon Hormane5054642015-05-25 14:28:36 +09001020static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1021 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001022{
Simon Hormane5054642015-05-25 14:28:36 +09001023 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001024 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1025
1026 if (!skb)
1027 return;
1028 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1029 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1030 dev_kfree_skb_any(skb);
1031}
1032
Simon Horman534ba6a2015-06-01 13:25:04 +09001033static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001034{
Simon Hormane5054642015-05-25 14:28:36 +09001035 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001036 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001037 int i;
1038 int err;
1039
1040 for (i = 0; i < rx_ring->size; i++) {
Simon Horman534ba6a2015-06-01 13:25:04 +09001041 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001042 &rx_ring->desc_info[i]);
1043 if (err)
1044 goto rollback;
1045 }
1046 return 0;
1047
1048rollback:
1049 for (i--; i >= 0; i--)
1050 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1051 return err;
1052}
1053
Simon Horman534ba6a2015-06-01 13:25:04 +09001054static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001055{
Simon Hormane5054642015-05-25 14:28:36 +09001056 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001057 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001058 int i;
1059
1060 for (i = 0; i < rx_ring->size; i++)
1061 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1062}
1063
1064static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1065{
1066 struct rocker *rocker = rocker_port->rocker;
1067 int err;
1068
1069 err = rocker_dma_ring_create(rocker,
1070 ROCKER_DMA_TX(rocker_port->port_number),
1071 ROCKER_DMA_TX_DEFAULT_SIZE,
1072 &rocker_port->tx_ring);
1073 if (err) {
1074 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1075 return err;
1076 }
1077
1078 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1079 PCI_DMA_TODEVICE,
1080 ROCKER_DMA_TX_DESC_SIZE);
1081 if (err) {
1082 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1083 goto err_dma_tx_ring_bufs_alloc;
1084 }
1085
1086 err = rocker_dma_ring_create(rocker,
1087 ROCKER_DMA_RX(rocker_port->port_number),
1088 ROCKER_DMA_RX_DEFAULT_SIZE,
1089 &rocker_port->rx_ring);
1090 if (err) {
1091 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1092 goto err_dma_rx_ring_create;
1093 }
1094
1095 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1096 PCI_DMA_BIDIRECTIONAL,
1097 ROCKER_DMA_RX_DESC_SIZE);
1098 if (err) {
1099 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1100 goto err_dma_rx_ring_bufs_alloc;
1101 }
1102
Simon Horman534ba6a2015-06-01 13:25:04 +09001103 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001104 if (err) {
1105 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1106 goto err_dma_rx_ring_skbs_alloc;
1107 }
1108 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1109
1110 return 0;
1111
1112err_dma_rx_ring_skbs_alloc:
1113 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1114 PCI_DMA_BIDIRECTIONAL);
1115err_dma_rx_ring_bufs_alloc:
1116 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1117err_dma_rx_ring_create:
1118 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1119 PCI_DMA_TODEVICE);
1120err_dma_tx_ring_bufs_alloc:
1121 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1122 return err;
1123}
1124
1125static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1126{
1127 struct rocker *rocker = rocker_port->rocker;
1128
Simon Horman534ba6a2015-06-01 13:25:04 +09001129 rocker_dma_rx_ring_skbs_free(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001130 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1131 PCI_DMA_BIDIRECTIONAL);
1132 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1133 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1134 PCI_DMA_TODEVICE);
1135 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1136}
1137
Simon Hormane5054642015-05-25 14:28:36 +09001138static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1139 bool enable)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001140{
1141 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1142
1143 if (enable)
David S. Miller71a83a62015-03-03 21:16:48 -05001144 val |= 1ULL << rocker_port->pport;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001145 else
David S. Miller71a83a62015-03-03 21:16:48 -05001146 val &= ~(1ULL << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001147 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1148}
1149
1150/********************************
1151 * Interrupt handler and helpers
1152 ********************************/
1153
1154static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1155{
1156 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001157 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001158 struct rocker_wait *wait;
1159 u32 credits = 0;
1160
1161 spin_lock(&rocker->cmd_ring_lock);
1162 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1163 wait = rocker_desc_cookie_ptr_get(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001164 if (wait->nowait) {
1165 rocker_desc_gen_clear(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001166 } else {
1167 rocker_wait_wake_up(wait);
1168 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001169 credits++;
1170 }
1171 spin_unlock(&rocker->cmd_ring_lock);
1172 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1173
1174 return IRQ_HANDLED;
1175}
1176
Simon Hormane5054642015-05-25 14:28:36 +09001177static void rocker_port_link_up(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001178{
1179 netif_carrier_on(rocker_port->dev);
1180 netdev_info(rocker_port->dev, "Link is up\n");
1181}
1182
Simon Hormane5054642015-05-25 14:28:36 +09001183static void rocker_port_link_down(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001184{
1185 netif_carrier_off(rocker_port->dev);
1186 netdev_info(rocker_port->dev, "Link is down\n");
1187}
1188
Simon Hormane5054642015-05-25 14:28:36 +09001189static int rocker_event_link_change(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001190 const struct rocker_tlv *info)
1191{
Simon Hormane5054642015-05-25 14:28:36 +09001192 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001193 unsigned int port_number;
1194 bool link_up;
1195 struct rocker_port *rocker_port;
1196
1197 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001198 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001199 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1200 return -EIO;
1201 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001202 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001203 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1204
1205 if (port_number >= rocker->port_count)
1206 return -EINVAL;
1207
1208 rocker_port = rocker->ports[port_number];
1209 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1210 if (link_up)
1211 rocker_port_link_up(rocker_port);
1212 else
1213 rocker_port_link_down(rocker_port);
1214 }
1215
1216 return 0;
1217}
1218
Scott Feldman6c707942014-11-28 14:34:28 +01001219static int rocker_port_fdb(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02001220 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01001221 const unsigned char *addr,
1222 __be16 vlan_id, int flags);
Jiri Pirkoe4201142016-02-16 15:14:45 +01001223static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1224 const unsigned char *addr,
1225 __be16 vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01001226
Simon Hormane5054642015-05-25 14:28:36 +09001227static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
Scott Feldman6c707942014-11-28 14:34:28 +01001228 const struct rocker_tlv *info)
1229{
Simon Hormane5054642015-05-25 14:28:36 +09001230 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
Scott Feldman6c707942014-11-28 14:34:28 +01001231 unsigned int port_number;
1232 struct rocker_port *rocker_port;
Simon Hormane5054642015-05-25 14:28:36 +09001233 const unsigned char *addr;
Scott Feldman92014b92015-06-12 21:35:49 -07001234 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
Scott Feldman6c707942014-11-28 14:34:28 +01001235 __be16 vlan_id;
Jiri Pirkoe4201142016-02-16 15:14:45 +01001236 int err;
Scott Feldman6c707942014-11-28 14:34:28 +01001237
1238 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001239 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
Scott Feldman6c707942014-11-28 14:34:28 +01001240 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1241 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1242 return -EIO;
1243 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001244 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
Scott Feldman6c707942014-11-28 14:34:28 +01001245 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
Jiri Pirko9b03c712014-12-03 14:14:53 +01001246 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
Scott Feldman6c707942014-11-28 14:34:28 +01001247
1248 if (port_number >= rocker->port_count)
1249 return -EINVAL;
1250
1251 rocker_port = rocker->ports[port_number];
1252
Jiri Pirkoe4201142016-02-16 15:14:45 +01001253 err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1254 if (err)
1255 return err;
1256
Scott Feldman6c707942014-11-28 14:34:28 +01001257 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1258 rocker_port->stp_state != BR_STATE_FORWARDING)
1259 return 0;
1260
Jiri Pirko76c6f942015-09-24 10:02:44 +02001261 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01001262}
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001263
Simon Hormane5054642015-05-25 14:28:36 +09001264static int rocker_event_process(const struct rocker *rocker,
1265 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001266{
Simon Hormane5054642015-05-25 14:28:36 +09001267 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1268 const struct rocker_tlv *info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001269 u16 type;
1270
1271 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1272 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1273 !attrs[ROCKER_TLV_EVENT_INFO])
1274 return -EIO;
1275
1276 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1277 info = attrs[ROCKER_TLV_EVENT_INFO];
1278
1279 switch (type) {
1280 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1281 return rocker_event_link_change(rocker, info);
Scott Feldman6c707942014-11-28 14:34:28 +01001282 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1283 return rocker_event_mac_vlan_seen(rocker, info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001284 }
1285
1286 return -EOPNOTSUPP;
1287}
1288
1289static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1290{
1291 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001292 const struct pci_dev *pdev = rocker->pdev;
1293 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001294 u32 credits = 0;
1295 int err;
1296
1297 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1298 err = rocker_desc_err(desc_info);
1299 if (err) {
1300 dev_err(&pdev->dev, "event desc received with err %d\n",
1301 err);
1302 } else {
1303 err = rocker_event_process(rocker, desc_info);
1304 if (err)
1305 dev_err(&pdev->dev, "event processing failed with err %d\n",
1306 err);
1307 }
1308 rocker_desc_gen_clear(desc_info);
1309 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1310 credits++;
1311 }
1312 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1313
1314 return IRQ_HANDLED;
1315}
1316
1317static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1318{
1319 struct rocker_port *rocker_port = dev_id;
1320
1321 napi_schedule(&rocker_port->napi_tx);
1322 return IRQ_HANDLED;
1323}
1324
1325static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1326{
1327 struct rocker_port *rocker_port = dev_id;
1328
1329 napi_schedule(&rocker_port->napi_rx);
1330 return IRQ_HANDLED;
1331}
1332
1333/********************
1334 * Command interface
1335 ********************/
1336
Simon Horman534ba6a2015-06-01 13:25:04 +09001337typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001338 struct rocker_desc_info *desc_info,
1339 void *priv);
1340
Simon Horman534ba6a2015-06-01 13:25:04 +09001341typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001342 const struct rocker_desc_info *desc_info,
1343 void *priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001344
Jiri Pirko53901cc2016-02-16 15:14:49 +01001345static int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait,
Simon Hormane5054642015-05-25 14:28:36 +09001346 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1347 rocker_cmd_proc_cb_t process, void *process_priv)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001348{
Simon Horman534ba6a2015-06-01 13:25:04 +09001349 struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001350 struct rocker_desc_info *desc_info;
1351 struct rocker_wait *wait;
Scott Feldman179f9a22015-06-12 21:35:46 -07001352 unsigned long lock_flags;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001353 int err;
1354
Scott Feldman179f9a22015-06-12 21:35:46 -07001355 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001356
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001357 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1358 if (!desc_info) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001359 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +01001360 return -EAGAIN;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001361 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001362
Jiri Pirkoca0a5f22016-02-16 15:14:47 +01001363 wait = rocker_desc_cookie_ptr_get(desc_info);
1364 rocker_wait_init(wait);
1365 wait->nowait = nowait;
1366
Simon Horman534ba6a2015-06-01 13:25:04 +09001367 err = prepare(rocker_port, desc_info, prepare_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001368 if (err) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001369 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +01001370 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001371 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001372
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001373 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001374
Scott Feldman179f9a22015-06-12 21:35:46 -07001375 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1376
1377 if (nowait)
1378 return 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001379
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001380 if (!rocker_wait_event_timeout(wait, HZ / 10))
1381 return -EIO;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001382
1383 err = rocker_desc_err(desc_info);
1384 if (err)
1385 return err;
1386
1387 if (process)
Simon Horman534ba6a2015-06-01 13:25:04 +09001388 err = process(rocker_port, desc_info, process_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001389
1390 rocker_desc_gen_clear(desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001391 return err;
1392}
1393
1394static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001395rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001396 struct rocker_desc_info *desc_info,
1397 void *priv)
1398{
1399 struct rocker_tlv *cmd_info;
1400
1401 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1402 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1403 return -EMSGSIZE;
1404 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1405 if (!cmd_info)
1406 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001407 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1408 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001409 return -EMSGSIZE;
1410 rocker_tlv_nest_end(desc_info, cmd_info);
1411 return 0;
1412}
1413
1414static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001415rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001416 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001417 void *priv)
1418{
1419 struct ethtool_cmd *ecmd = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001420 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1421 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001422 u32 speed;
1423 u8 duplex;
1424 u8 autoneg;
1425
1426 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1427 if (!attrs[ROCKER_TLV_CMD_INFO])
1428 return -EIO;
1429
1430 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1431 attrs[ROCKER_TLV_CMD_INFO]);
1432 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1433 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1434 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1435 return -EIO;
1436
1437 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1438 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1439 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1440
1441 ecmd->transceiver = XCVR_INTERNAL;
1442 ecmd->supported = SUPPORTED_TP;
1443 ecmd->phy_address = 0xff;
1444 ecmd->port = PORT_TP;
1445 ethtool_cmd_speed_set(ecmd, speed);
1446 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1447 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1448
1449 return 0;
1450}
1451
1452static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001453rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001454 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001455 void *priv)
1456{
1457 unsigned char *macaddr = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001458 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1459 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1460 const struct rocker_tlv *attr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001461
1462 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1463 if (!attrs[ROCKER_TLV_CMD_INFO])
1464 return -EIO;
1465
1466 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1467 attrs[ROCKER_TLV_CMD_INFO]);
1468 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1469 if (!attr)
1470 return -EIO;
1471
1472 if (rocker_tlv_len(attr) != ETH_ALEN)
1473 return -EINVAL;
1474
1475 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1476 return 0;
1477}
1478
Jiri Pirkoe1ba3de2016-02-16 15:14:43 +01001479static int
1480rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1481 const struct rocker_desc_info *desc_info,
1482 void *priv)
1483{
1484 u8 *p_mode = priv;
1485 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1486 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1487 const struct rocker_tlv *attr;
1488
1489 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1490 if (!attrs[ROCKER_TLV_CMD_INFO])
1491 return -EIO;
1492
1493 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1494 attrs[ROCKER_TLV_CMD_INFO]);
1495 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1496 if (!attr)
1497 return -EIO;
1498
1499 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1500 return 0;
1501}
1502
David Aherndb191702015-03-17 20:23:16 -06001503struct port_name {
1504 char *buf;
1505 size_t len;
1506};
1507
1508static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001509rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001510 const struct rocker_desc_info *desc_info,
David Aherndb191702015-03-17 20:23:16 -06001511 void *priv)
1512{
Simon Hormane5054642015-05-25 14:28:36 +09001513 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1514 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
David Aherndb191702015-03-17 20:23:16 -06001515 struct port_name *name = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001516 const struct rocker_tlv *attr;
David Aherndb191702015-03-17 20:23:16 -06001517 size_t i, j, len;
Simon Hormane5054642015-05-25 14:28:36 +09001518 const char *str;
David Aherndb191702015-03-17 20:23:16 -06001519
1520 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1521 if (!attrs[ROCKER_TLV_CMD_INFO])
1522 return -EIO;
1523
1524 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1525 attrs[ROCKER_TLV_CMD_INFO]);
1526 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1527 if (!attr)
1528 return -EIO;
1529
1530 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1531 str = rocker_tlv_data(attr);
1532
1533 /* make sure name only contains alphanumeric characters */
1534 for (i = j = 0; i < len; ++i) {
1535 if (isalnum(str[i])) {
1536 name->buf[j] = str[i];
1537 j++;
1538 }
1539 }
1540
1541 if (j == 0)
1542 return -EIO;
1543
1544 name->buf[j] = '\0';
1545
1546 return 0;
1547}
1548
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001549static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001550rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001551 struct rocker_desc_info *desc_info,
1552 void *priv)
1553{
1554 struct ethtool_cmd *ecmd = priv;
1555 struct rocker_tlv *cmd_info;
1556
1557 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1558 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1559 return -EMSGSIZE;
1560 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1561 if (!cmd_info)
1562 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001563 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1564 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001565 return -EMSGSIZE;
1566 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1567 ethtool_cmd_speed(ecmd)))
1568 return -EMSGSIZE;
1569 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1570 ecmd->duplex))
1571 return -EMSGSIZE;
1572 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1573 ecmd->autoneg))
1574 return -EMSGSIZE;
1575 rocker_tlv_nest_end(desc_info, cmd_info);
1576 return 0;
1577}
1578
1579static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001580rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001581 struct rocker_desc_info *desc_info,
1582 void *priv)
1583{
Simon Hormane5054642015-05-25 14:28:36 +09001584 const unsigned char *macaddr = priv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001585 struct rocker_tlv *cmd_info;
1586
1587 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1588 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1589 return -EMSGSIZE;
1590 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1591 if (!cmd_info)
1592 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001593 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1594 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001595 return -EMSGSIZE;
1596 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1597 ETH_ALEN, macaddr))
1598 return -EMSGSIZE;
1599 rocker_tlv_nest_end(desc_info, cmd_info);
1600 return 0;
1601}
1602
Scott Feldman5111f802014-11-28 14:34:30 +01001603static int
Scott Feldman77a58c72015-07-08 16:06:47 -07001604rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1605 struct rocker_desc_info *desc_info,
1606 void *priv)
1607{
1608 int mtu = *(int *)priv;
1609 struct rocker_tlv *cmd_info;
1610
1611 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1612 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1613 return -EMSGSIZE;
1614 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1615 if (!cmd_info)
1616 return -EMSGSIZE;
1617 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1618 rocker_port->pport))
1619 return -EMSGSIZE;
1620 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1621 mtu))
1622 return -EMSGSIZE;
1623 rocker_tlv_nest_end(desc_info, cmd_info);
1624 return 0;
1625}
1626
1627static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001628rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
Scott Feldman5111f802014-11-28 14:34:30 +01001629 struct rocker_desc_info *desc_info,
1630 void *priv)
1631{
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001632 bool learning = *(bool *)priv;
Scott Feldman5111f802014-11-28 14:34:30 +01001633 struct rocker_tlv *cmd_info;
1634
1635 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1636 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1637 return -EMSGSIZE;
1638 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1639 if (!cmd_info)
1640 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001641 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1642 rocker_port->pport))
Scott Feldman5111f802014-11-28 14:34:30 +01001643 return -EMSGSIZE;
1644 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001645 learning))
Scott Feldman5111f802014-11-28 14:34:30 +01001646 return -EMSGSIZE;
1647 rocker_tlv_nest_end(desc_info, cmd_info);
1648 return 0;
1649}
1650
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001651static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1652 struct ethtool_cmd *ecmd)
1653{
Jiri Pirko53901cc2016-02-16 15:14:49 +01001654 return rocker_cmd_exec(rocker_port, false,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001655 rocker_cmd_get_port_settings_prep, NULL,
1656 rocker_cmd_get_port_settings_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001657 ecmd);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001658}
1659
1660static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1661 unsigned char *macaddr)
1662{
Jiri Pirko53901cc2016-02-16 15:14:49 +01001663 return rocker_cmd_exec(rocker_port, false,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001664 rocker_cmd_get_port_settings_prep, NULL,
1665 rocker_cmd_get_port_settings_macaddr_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001666 macaddr);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001667}
1668
Jiri Pirkoe1ba3de2016-02-16 15:14:43 +01001669static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1670 u8 *p_mode)
1671{
Jiri Pirko53901cc2016-02-16 15:14:49 +01001672 return rocker_cmd_exec(rocker_port, false,
Jiri Pirkoe1ba3de2016-02-16 15:14:43 +01001673 rocker_cmd_get_port_settings_prep, NULL,
1674 rocker_cmd_get_port_settings_mode_proc, p_mode);
1675}
1676
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001677static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1678 struct ethtool_cmd *ecmd)
1679{
Jiri Pirko53901cc2016-02-16 15:14:49 +01001680 return rocker_cmd_exec(rocker_port, false,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001681 rocker_cmd_set_port_settings_ethtool_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001682 ecmd, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001683}
1684
1685static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1686 unsigned char *macaddr)
1687{
Jiri Pirko53901cc2016-02-16 15:14:49 +01001688 return rocker_cmd_exec(rocker_port, false,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001689 rocker_cmd_set_port_settings_macaddr_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001690 macaddr, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001691}
1692
Scott Feldman77a58c72015-07-08 16:06:47 -07001693static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1694 int mtu)
1695{
Jiri Pirko53901cc2016-02-16 15:14:49 +01001696 return rocker_cmd_exec(rocker_port, false,
Scott Feldman77a58c72015-07-08 16:06:47 -07001697 rocker_cmd_set_port_settings_mtu_prep,
1698 &mtu, NULL, NULL);
1699}
1700
Scott Feldmanc4f20322015-05-10 09:47:50 -07001701static int rocker_port_set_learning(struct rocker_port *rocker_port,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001702 bool learning)
Scott Feldman5111f802014-11-28 14:34:30 +01001703{
Jiri Pirko53901cc2016-02-16 15:14:49 +01001704 return rocker_cmd_exec(rocker_port, false,
Scott Feldman5111f802014-11-28 14:34:30 +01001705 rocker_cmd_set_port_learning_prep,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001706 &learning, NULL, NULL);
Scott Feldman5111f802014-11-28 14:34:30 +01001707}
1708
Jiri Pirkoe4201142016-02-16 15:14:45 +01001709/**********************
1710 * Worlds manipulation
1711 **********************/
1712
1713static struct rocker_world_ops *rocker_world_ops[] = {
1714 &rocker_ofdpa_ops,
1715};
1716
1717#define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1718
1719static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1720{
1721 int i;
1722
1723 for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1724 if (rocker_world_ops[i]->mode == mode)
1725 return rocker_world_ops[i];
1726 return NULL;
1727}
1728
1729static int rocker_world_init(struct rocker *rocker, u8 mode)
1730{
1731 struct rocker_world_ops *wops;
1732 int err;
1733
1734 wops = rocker_world_ops_find(mode);
1735 if (!wops) {
1736 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1737 mode);
1738 return -EINVAL;
1739 }
1740 rocker->wops = wops;
1741 rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1742 if (!rocker->wpriv)
1743 return -ENOMEM;
1744 if (!wops->init)
1745 return 0;
1746 err = wops->init(rocker);
1747 if (err)
1748 kfree(rocker->wpriv);
1749 return err;
1750}
1751
1752static void rocker_world_fini(struct rocker *rocker)
1753{
1754 struct rocker_world_ops *wops = rocker->wops;
1755
1756 if (!wops || !wops->fini)
1757 return;
1758 wops->fini(rocker);
1759 kfree(rocker->wpriv);
1760}
1761
1762static int rocker_world_check_init(struct rocker_port *rocker_port)
1763{
1764 struct rocker *rocker = rocker_port->rocker;
1765 u8 mode;
1766 int err;
1767
1768 err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1769 if (err) {
1770 dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1771 return err;
1772 }
1773 if (rocker->wops) {
1774 if (rocker->wops->mode != mode) {
1775 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1776 return err;
1777 }
1778 return 0;
1779 }
1780 return rocker_world_init(rocker, mode);
1781}
1782
1783static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1784{
1785 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1786 int err;
1787
1788 rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1789 if (!rocker_port->wpriv)
1790 return -ENOMEM;
1791 if (!wops->port_pre_init)
1792 return 0;
1793 err = wops->port_pre_init(rocker_port);
1794 if (err)
1795 kfree(rocker_port->wpriv);
1796 return 0;
1797}
1798
1799static int rocker_world_port_init(struct rocker_port *rocker_port)
1800{
1801 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1802
1803 if (!wops->port_init)
1804 return 0;
1805 return wops->port_init(rocker_port);
1806}
1807
1808static void rocker_world_port_fini(struct rocker_port *rocker_port)
1809{
1810 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1811
1812 if (!wops->port_fini)
1813 return;
1814 wops->port_fini(rocker_port);
1815}
1816
1817static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1818{
1819 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1820
1821 if (!wops->port_post_fini)
1822 return;
1823 wops->port_post_fini(rocker_port);
1824 kfree(rocker_port->wpriv);
1825}
1826
1827static int rocker_world_port_open(struct rocker_port *rocker_port)
1828{
1829 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1830
1831 if (!wops->port_open)
1832 return 0;
1833 return wops->port_open(rocker_port);
1834}
1835
1836static void rocker_world_port_stop(struct rocker_port *rocker_port)
1837{
1838 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1839
1840 if (!wops->port_stop)
1841 return;
1842 wops->port_stop(rocker_port);
1843}
1844
1845static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1846 u8 state,
1847 struct switchdev_trans *trans)
1848{
1849 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1850
1851 if (!wops->port_attr_stp_state_set)
1852 return 0;
1853 return wops->port_attr_stp_state_set(rocker_port, state, trans);
1854}
1855
1856static int
1857rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1858 unsigned long brport_flags,
1859 struct switchdev_trans *trans)
1860{
1861 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1862
1863 if (!wops->port_attr_bridge_flags_set)
1864 return 0;
1865 return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
1866 trans);
1867}
1868
1869static int
1870rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
1871 unsigned long *p_brport_flags)
1872{
1873 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1874
1875 if (!wops->port_attr_bridge_flags_get)
1876 return 0;
1877 return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
1878}
1879
1880static int
1881rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1882 u32 ageing_time,
1883 struct switchdev_trans *trans)
1884
1885{
1886 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1887
1888 if (!wops->port_attr_bridge_ageing_time_set)
1889 return 0;
1890 return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
1891 trans);
1892}
1893
1894static int
1895rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1896 const struct switchdev_obj_port_vlan *vlan,
1897 struct switchdev_trans *trans)
1898{
1899 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1900
1901 if (!wops->port_obj_vlan_add)
1902 return 0;
1903 return wops->port_obj_vlan_add(rocker_port, vlan, trans);
1904}
1905
1906static int
1907rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1908 const struct switchdev_obj_port_vlan *vlan)
1909{
1910 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1911
1912 if (!wops->port_obj_vlan_del)
1913 return 0;
1914 return wops->port_obj_vlan_del(rocker_port, vlan);
1915}
1916
1917static int
1918rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
1919 struct switchdev_obj_port_vlan *vlan,
1920 switchdev_obj_dump_cb_t *cb)
1921{
1922 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1923
1924 if (!wops->port_obj_vlan_dump)
1925 return 0;
1926 return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
1927}
1928
1929static int
1930rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
1931 const struct switchdev_obj_ipv4_fib *fib4,
1932 struct switchdev_trans *trans)
1933{
1934 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1935
1936 if (!wops->port_obj_fib4_add)
1937 return 0;
1938 return wops->port_obj_fib4_add(rocker_port, fib4, trans);
1939}
1940
1941static int
1942rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
1943 const struct switchdev_obj_ipv4_fib *fib4)
1944{
1945 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1946
1947 if (!wops->port_obj_fib4_del)
1948 return 0;
1949 return wops->port_obj_fib4_del(rocker_port, fib4);
1950}
1951
1952static int
1953rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
1954 const struct switchdev_obj_port_fdb *fdb,
1955 struct switchdev_trans *trans)
1956{
1957 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1958
1959 if (!wops->port_obj_fdb_add)
1960 return 0;
1961 return wops->port_obj_fdb_add(rocker_port, fdb, trans);
1962}
1963
1964static int
1965rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
1966 const struct switchdev_obj_port_fdb *fdb)
1967{
1968 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1969
1970 if (!wops->port_obj_fdb_del)
1971 return 0;
1972 return wops->port_obj_fdb_del(rocker_port, fdb);
1973}
1974
1975static int
1976rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
1977 struct switchdev_obj_port_fdb *fdb,
1978 switchdev_obj_dump_cb_t *cb)
1979{
1980 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1981
1982 if (!wops->port_obj_fdb_dump)
1983 return 0;
1984 return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
1985}
1986
1987static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1988 struct net_device *master)
1989{
1990 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1991
1992 if (!wops->port_master_linked)
1993 return 0;
1994 return wops->port_master_linked(rocker_port, master);
1995}
1996
1997static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1998 struct net_device *master)
1999{
2000 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2001
2002 if (!wops->port_master_unlinked)
2003 return 0;
2004 return wops->port_master_unlinked(rocker_port, master);
2005}
2006
2007static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
2008 struct neighbour *n)
2009{
2010 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2011
2012 if (!wops->port_neigh_update)
2013 return 0;
2014 return wops->port_neigh_update(rocker_port, n);
2015}
2016
2017static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
2018 struct neighbour *n)
2019{
2020 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2021
2022 if (!wops->port_neigh_destroy)
2023 return 0;
2024 return wops->port_neigh_destroy(rocker_port, n);
2025}
2026
2027static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2028 const unsigned char *addr,
2029 __be16 vlan_id)
2030{
2031 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2032
2033 if (!wops->port_ev_mac_vlan_seen)
2034 return 0;
2035 return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
2036}
2037
Simon Hormane5054642015-05-25 14:28:36 +09002038static int
2039rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
2040 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002041{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002042 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2043 entry->key.ig_port.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002044 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002045 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2046 entry->key.ig_port.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002047 return -EMSGSIZE;
2048 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2049 entry->key.ig_port.goto_tbl))
2050 return -EMSGSIZE;
2051
2052 return 0;
2053}
2054
Simon Hormane5054642015-05-25 14:28:36 +09002055static int
2056rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
2057 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002058{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002059 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2060 entry->key.vlan.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002061 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002062 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2063 entry->key.vlan.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002064 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002065 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2066 entry->key.vlan.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002067 return -EMSGSIZE;
2068 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2069 entry->key.vlan.goto_tbl))
2070 return -EMSGSIZE;
2071 if (entry->key.vlan.untagged &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002072 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
2073 entry->key.vlan.new_vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002074 return -EMSGSIZE;
2075
2076 return 0;
2077}
2078
Simon Hormane5054642015-05-25 14:28:36 +09002079static int
2080rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
2081 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002082{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002083 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2084 entry->key.term_mac.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002085 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002086 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2087 entry->key.term_mac.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002088 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002089 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2090 entry->key.term_mac.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002091 return -EMSGSIZE;
2092 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2093 ETH_ALEN, entry->key.term_mac.eth_dst))
2094 return -EMSGSIZE;
2095 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2096 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
2097 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002098 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2099 entry->key.term_mac.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002100 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002101 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2102 entry->key.term_mac.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002103 return -EMSGSIZE;
2104 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2105 entry->key.term_mac.goto_tbl))
2106 return -EMSGSIZE;
2107 if (entry->key.term_mac.copy_to_cpu &&
2108 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2109 entry->key.term_mac.copy_to_cpu))
2110 return -EMSGSIZE;
2111
2112 return 0;
2113}
2114
2115static int
2116rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002117 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002118{
Jiri Pirko9b03c712014-12-03 14:14:53 +01002119 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2120 entry->key.ucast_routing.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002121 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002122 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2123 entry->key.ucast_routing.dst4))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002124 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002125 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2126 entry->key.ucast_routing.dst4_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002127 return -EMSGSIZE;
2128 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2129 entry->key.ucast_routing.goto_tbl))
2130 return -EMSGSIZE;
2131 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2132 entry->key.ucast_routing.group_id))
2133 return -EMSGSIZE;
2134
2135 return 0;
2136}
2137
Simon Hormane5054642015-05-25 14:28:36 +09002138static int
2139rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2140 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002141{
2142 if (entry->key.bridge.has_eth_dst &&
2143 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2144 ETH_ALEN, entry->key.bridge.eth_dst))
2145 return -EMSGSIZE;
2146 if (entry->key.bridge.has_eth_dst_mask &&
2147 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2148 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2149 return -EMSGSIZE;
2150 if (entry->key.bridge.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002151 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2152 entry->key.bridge.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002153 return -EMSGSIZE;
2154 if (entry->key.bridge.tunnel_id &&
2155 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2156 entry->key.bridge.tunnel_id))
2157 return -EMSGSIZE;
2158 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2159 entry->key.bridge.goto_tbl))
2160 return -EMSGSIZE;
2161 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2162 entry->key.bridge.group_id))
2163 return -EMSGSIZE;
2164 if (entry->key.bridge.copy_to_cpu &&
2165 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2166 entry->key.bridge.copy_to_cpu))
2167 return -EMSGSIZE;
2168
2169 return 0;
2170}
2171
Simon Hormane5054642015-05-25 14:28:36 +09002172static int
2173rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2174 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002175{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002176 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2177 entry->key.acl.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002178 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002179 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2180 entry->key.acl.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002181 return -EMSGSIZE;
2182 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2183 ETH_ALEN, entry->key.acl.eth_src))
2184 return -EMSGSIZE;
2185 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2186 ETH_ALEN, entry->key.acl.eth_src_mask))
2187 return -EMSGSIZE;
2188 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2189 ETH_ALEN, entry->key.acl.eth_dst))
2190 return -EMSGSIZE;
2191 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2192 ETH_ALEN, entry->key.acl.eth_dst_mask))
2193 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002194 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2195 entry->key.acl.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002196 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002197 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2198 entry->key.acl.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002199 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002200 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2201 entry->key.acl.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002202 return -EMSGSIZE;
2203
2204 switch (ntohs(entry->key.acl.eth_type)) {
2205 case ETH_P_IP:
2206 case ETH_P_IPV6:
2207 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2208 entry->key.acl.ip_proto))
2209 return -EMSGSIZE;
2210 if (rocker_tlv_put_u8(desc_info,
2211 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2212 entry->key.acl.ip_proto_mask))
2213 return -EMSGSIZE;
2214 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2215 entry->key.acl.ip_tos & 0x3f))
2216 return -EMSGSIZE;
2217 if (rocker_tlv_put_u8(desc_info,
2218 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2219 entry->key.acl.ip_tos_mask & 0x3f))
2220 return -EMSGSIZE;
2221 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2222 (entry->key.acl.ip_tos & 0xc0) >> 6))
2223 return -EMSGSIZE;
2224 if (rocker_tlv_put_u8(desc_info,
2225 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2226 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2227 return -EMSGSIZE;
2228 break;
2229 }
2230
2231 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2232 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2233 entry->key.acl.group_id))
2234 return -EMSGSIZE;
2235
2236 return 0;
2237}
2238
Simon Horman534ba6a2015-06-01 13:25:04 +09002239static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002240 struct rocker_desc_info *desc_info,
2241 void *priv)
2242{
Simon Hormane5054642015-05-25 14:28:36 +09002243 const struct rocker_flow_tbl_entry *entry = priv;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002244 struct rocker_tlv *cmd_info;
2245 int err = 0;
2246
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002247 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002248 return -EMSGSIZE;
2249 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2250 if (!cmd_info)
2251 return -EMSGSIZE;
2252 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2253 entry->key.tbl_id))
2254 return -EMSGSIZE;
2255 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2256 entry->key.priority))
2257 return -EMSGSIZE;
2258 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2259 return -EMSGSIZE;
2260 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2261 entry->cookie))
2262 return -EMSGSIZE;
2263
2264 switch (entry->key.tbl_id) {
2265 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2266 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2267 break;
2268 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2269 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2270 break;
2271 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2272 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2273 break;
2274 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2275 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2276 break;
2277 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2278 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2279 break;
2280 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2281 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2282 break;
2283 default:
2284 err = -ENOTSUPP;
2285 break;
2286 }
2287
2288 if (err)
2289 return err;
2290
2291 rocker_tlv_nest_end(desc_info, cmd_info);
2292
2293 return 0;
2294}
2295
Simon Horman534ba6a2015-06-01 13:25:04 +09002296static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002297 struct rocker_desc_info *desc_info,
2298 void *priv)
2299{
2300 const struct rocker_flow_tbl_entry *entry = priv;
2301 struct rocker_tlv *cmd_info;
2302
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002303 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002304 return -EMSGSIZE;
2305 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2306 if (!cmd_info)
2307 return -EMSGSIZE;
2308 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2309 entry->cookie))
2310 return -EMSGSIZE;
2311 rocker_tlv_nest_end(desc_info, cmd_info);
2312
2313 return 0;
2314}
2315
2316static int
2317rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2318 struct rocker_group_tbl_entry *entry)
2319{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002320 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002321 ROCKER_GROUP_PORT_GET(entry->group_id)))
2322 return -EMSGSIZE;
2323 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2324 entry->l2_interface.pop_vlan))
2325 return -EMSGSIZE;
2326
2327 return 0;
2328}
2329
2330static int
2331rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002332 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002333{
2334 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2335 entry->l2_rewrite.group_id))
2336 return -EMSGSIZE;
2337 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2338 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2339 ETH_ALEN, entry->l2_rewrite.eth_src))
2340 return -EMSGSIZE;
2341 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2342 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2343 ETH_ALEN, entry->l2_rewrite.eth_dst))
2344 return -EMSGSIZE;
2345 if (entry->l2_rewrite.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002346 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2347 entry->l2_rewrite.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002348 return -EMSGSIZE;
2349
2350 return 0;
2351}
2352
2353static int
2354rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002355 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002356{
2357 int i;
2358 struct rocker_tlv *group_ids;
2359
2360 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2361 entry->group_count))
2362 return -EMSGSIZE;
2363
2364 group_ids = rocker_tlv_nest_start(desc_info,
2365 ROCKER_TLV_OF_DPA_GROUP_IDS);
2366 if (!group_ids)
2367 return -EMSGSIZE;
2368
2369 for (i = 0; i < entry->group_count; i++)
2370 /* Note TLV array is 1-based */
2371 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2372 return -EMSGSIZE;
2373
2374 rocker_tlv_nest_end(desc_info, group_ids);
2375
2376 return 0;
2377}
2378
2379static int
2380rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002381 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002382{
2383 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2384 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2385 ETH_ALEN, entry->l3_unicast.eth_src))
2386 return -EMSGSIZE;
2387 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2388 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2389 ETH_ALEN, entry->l3_unicast.eth_dst))
2390 return -EMSGSIZE;
2391 if (entry->l3_unicast.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002392 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2393 entry->l3_unicast.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002394 return -EMSGSIZE;
2395 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2396 entry->l3_unicast.ttl_check))
2397 return -EMSGSIZE;
2398 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2399 entry->l3_unicast.group_id))
2400 return -EMSGSIZE;
2401
2402 return 0;
2403}
2404
Simon Horman534ba6a2015-06-01 13:25:04 +09002405static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002406 struct rocker_desc_info *desc_info,
2407 void *priv)
2408{
2409 struct rocker_group_tbl_entry *entry = priv;
2410 struct rocker_tlv *cmd_info;
2411 int err = 0;
2412
2413 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2414 return -EMSGSIZE;
2415 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2416 if (!cmd_info)
2417 return -EMSGSIZE;
2418
2419 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2420 entry->group_id))
2421 return -EMSGSIZE;
2422
2423 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2424 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2425 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2426 break;
2427 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2428 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2429 break;
2430 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2431 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2432 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2433 break;
2434 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2435 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2436 break;
2437 default:
2438 err = -ENOTSUPP;
2439 break;
2440 }
2441
2442 if (err)
2443 return err;
2444
2445 rocker_tlv_nest_end(desc_info, cmd_info);
2446
2447 return 0;
2448}
2449
Simon Horman534ba6a2015-06-01 13:25:04 +09002450static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002451 struct rocker_desc_info *desc_info,
2452 void *priv)
2453{
2454 const struct rocker_group_tbl_entry *entry = priv;
2455 struct rocker_tlv *cmd_info;
2456
2457 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2458 return -EMSGSIZE;
2459 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2460 if (!cmd_info)
2461 return -EMSGSIZE;
2462 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2463 entry->group_id))
2464 return -EMSGSIZE;
2465 rocker_tlv_nest_end(desc_info, cmd_info);
2466
2467 return 0;
2468}
2469
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002470/***************************************************
2471 * Flow, group, FDB, internal VLAN and neigh tables
2472 ***************************************************/
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002473
2474static int rocker_init_tbls(struct rocker *rocker)
2475{
2476 hash_init(rocker->flow_tbl);
2477 spin_lock_init(&rocker->flow_tbl_lock);
2478
2479 hash_init(rocker->group_tbl);
2480 spin_lock_init(&rocker->group_tbl_lock);
2481
2482 hash_init(rocker->fdb_tbl);
2483 spin_lock_init(&rocker->fdb_tbl_lock);
2484
2485 hash_init(rocker->internal_vlan_tbl);
2486 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2487
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002488 hash_init(rocker->neigh_tbl);
2489 spin_lock_init(&rocker->neigh_tbl_lock);
2490
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002491 return 0;
2492}
2493
2494static void rocker_free_tbls(struct rocker *rocker)
2495{
2496 unsigned long flags;
2497 struct rocker_flow_tbl_entry *flow_entry;
2498 struct rocker_group_tbl_entry *group_entry;
2499 struct rocker_fdb_tbl_entry *fdb_entry;
2500 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002501 struct rocker_neigh_tbl_entry *neigh_entry;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002502 struct hlist_node *tmp;
2503 int bkt;
2504
2505 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2506 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2507 hash_del(&flow_entry->entry);
2508 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2509
2510 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2511 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2512 hash_del(&group_entry->entry);
2513 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2514
2515 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2516 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2517 hash_del(&fdb_entry->entry);
2518 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2519
2520 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2521 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2522 tmp, internal_vlan_entry, entry)
2523 hash_del(&internal_vlan_entry->entry);
2524 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002525
2526 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2527 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2528 hash_del(&neigh_entry->entry);
2529 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002530}
2531
2532static struct rocker_flow_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002533rocker_flow_tbl_find(const struct rocker *rocker,
2534 const struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002535{
2536 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002537 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002538
2539 hash_for_each_possible(rocker->flow_tbl, found,
2540 entry, match->key_crc32) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002541 if (memcmp(&found->key, &match->key, key_len) == 0)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002542 return found;
2543 }
2544
2545 return NULL;
2546}
2547
2548static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002549 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002550 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002551{
2552 struct rocker *rocker = rocker_port->rocker;
2553 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002554 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002555 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002556
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002557 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002558
Scott Feldman179f9a22015-06-12 21:35:46 -07002559 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002560
2561 found = rocker_flow_tbl_find(rocker, match);
2562
2563 if (found) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002564 match->cookie = found->cookie;
Jiri Pirko76c6f942015-09-24 10:02:44 +02002565 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002566 hash_del(&found->entry);
Jiri Pirkob15edf82016-02-16 15:14:39 +01002567 rocker_kfree(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002568 found = match;
2569 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002570 } else {
2571 found = match;
2572 found->cookie = rocker->flow_tbl_next_cookie++;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002573 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002574 }
2575
Jiri Pirko76c6f942015-09-24 10:02:44 +02002576 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002577 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002578
Scott Feldman179f9a22015-06-12 21:35:46 -07002579 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002580
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002581 if (!switchdev_trans_ph_prepare(trans))
Jiri Pirko53901cc2016-02-16 15:14:49 +01002582 return rocker_cmd_exec(rocker_port,
2583 rocker_flags_nowait(flags),
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002584 rocker_cmd_flow_tbl_add,
2585 found, NULL, NULL);
2586 return 0;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002587}
2588
2589static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002590 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002591 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002592{
2593 struct rocker *rocker = rocker_port->rocker;
2594 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002595 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002596 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002597 int err = 0;
2598
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002599 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002600
Scott Feldman179f9a22015-06-12 21:35:46 -07002601 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002602
2603 found = rocker_flow_tbl_find(rocker, match);
2604
2605 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002606 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002607 hash_del(&found->entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002608 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002609 }
2610
Scott Feldman179f9a22015-06-12 21:35:46 -07002611 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002612
Jiri Pirkob15edf82016-02-16 15:14:39 +01002613 rocker_kfree(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002614
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002615 if (found) {
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002616 if (!switchdev_trans_ph_prepare(trans))
Jiri Pirko53901cc2016-02-16 15:14:49 +01002617 err = rocker_cmd_exec(rocker_port,
2618 rocker_flags_nowait(flags),
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002619 rocker_cmd_flow_tbl_del,
2620 found, NULL, NULL);
Jiri Pirkob15edf82016-02-16 15:14:39 +01002621 rocker_kfree(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002622 }
2623
2624 return err;
2625}
2626
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002627static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002628 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002629 struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002630{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002631 if (flags & ROCKER_OP_FLAG_REMOVE)
Jiri Pirko76c6f942015-09-24 10:02:44 +02002632 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002633 else
Jiri Pirko76c6f942015-09-24 10:02:44 +02002634 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002635}
2636
2637static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002638 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002639 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002640 enum rocker_of_dpa_table_id goto_tbl)
2641{
2642 struct rocker_flow_tbl_entry *entry;
2643
Jiri Pirkob15edf82016-02-16 15:14:39 +01002644 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002645 if (!entry)
2646 return -ENOMEM;
2647
2648 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2649 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002650 entry->key.ig_port.in_pport = in_pport;
2651 entry->key.ig_port.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002652 entry->key.ig_port.goto_tbl = goto_tbl;
2653
Jiri Pirko76c6f942015-09-24 10:02:44 +02002654 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002655}
2656
2657static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002658 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002659 u32 in_pport, __be16 vlan_id,
2660 __be16 vlan_id_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002661 enum rocker_of_dpa_table_id goto_tbl,
2662 bool untagged, __be16 new_vlan_id)
2663{
2664 struct rocker_flow_tbl_entry *entry;
2665
Jiri Pirkob15edf82016-02-16 15:14:39 +01002666 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002667 if (!entry)
2668 return -ENOMEM;
2669
2670 entry->key.priority = ROCKER_PRIORITY_VLAN;
2671 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002672 entry->key.vlan.in_pport = in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002673 entry->key.vlan.vlan_id = vlan_id;
2674 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2675 entry->key.vlan.goto_tbl = goto_tbl;
2676
2677 entry->key.vlan.untagged = untagged;
2678 entry->key.vlan.new_vlan_id = new_vlan_id;
2679
Jiri Pirko76c6f942015-09-24 10:02:44 +02002680 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002681}
2682
2683static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002684 struct switchdev_trans *trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002685 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002686 __be16 eth_type, const u8 *eth_dst,
2687 const u8 *eth_dst_mask, __be16 vlan_id,
2688 __be16 vlan_id_mask, bool copy_to_cpu,
2689 int flags)
2690{
2691 struct rocker_flow_tbl_entry *entry;
2692
Jiri Pirkob15edf82016-02-16 15:14:39 +01002693 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002694 if (!entry)
2695 return -ENOMEM;
2696
2697 if (is_multicast_ether_addr(eth_dst)) {
2698 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2699 entry->key.term_mac.goto_tbl =
2700 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2701 } else {
2702 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2703 entry->key.term_mac.goto_tbl =
2704 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2705 }
2706
2707 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002708 entry->key.term_mac.in_pport = in_pport;
2709 entry->key.term_mac.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002710 entry->key.term_mac.eth_type = eth_type;
2711 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2712 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2713 entry->key.term_mac.vlan_id = vlan_id;
2714 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2715 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2716
Jiri Pirko76c6f942015-09-24 10:02:44 +02002717 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002718}
2719
2720static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002721 struct switchdev_trans *trans, int flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002722 const u8 *eth_dst, const u8 *eth_dst_mask,
2723 __be16 vlan_id, u32 tunnel_id,
2724 enum rocker_of_dpa_table_id goto_tbl,
2725 u32 group_id, bool copy_to_cpu)
2726{
2727 struct rocker_flow_tbl_entry *entry;
2728 u32 priority;
2729 bool vlan_bridging = !!vlan_id;
2730 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2731 bool wild = false;
2732
Jiri Pirkob15edf82016-02-16 15:14:39 +01002733 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002734 if (!entry)
2735 return -ENOMEM;
2736
2737 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2738
2739 if (eth_dst) {
2740 entry->key.bridge.has_eth_dst = 1;
2741 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2742 }
2743 if (eth_dst_mask) {
2744 entry->key.bridge.has_eth_dst_mask = 1;
2745 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
Simon Horman629161f2015-04-30 15:21:29 +09002746 if (!ether_addr_equal(eth_dst_mask, ff_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002747 wild = true;
2748 }
2749
2750 priority = ROCKER_PRIORITY_UNKNOWN;
Thomas Graf51ace882014-11-28 14:34:32 +01002751 if (vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002752 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002753 else if (vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002754 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002755 else if (vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002756 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
Thomas Graf51ace882014-11-28 14:34:32 +01002757 else if (!vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002758 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002759 else if (!vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002760 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002761 else if (!vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002762 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2763
2764 entry->key.priority = priority;
2765 entry->key.bridge.vlan_id = vlan_id;
2766 entry->key.bridge.tunnel_id = tunnel_id;
2767 entry->key.bridge.goto_tbl = goto_tbl;
2768 entry->key.bridge.group_id = group_id;
2769 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2770
Jiri Pirko76c6f942015-09-24 10:02:44 +02002771 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002772}
2773
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002774static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002775 struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002776 __be16 eth_type, __be32 dst,
2777 __be32 dst_mask, u32 priority,
2778 enum rocker_of_dpa_table_id goto_tbl,
2779 u32 group_id, int flags)
2780{
2781 struct rocker_flow_tbl_entry *entry;
2782
Jiri Pirkob15edf82016-02-16 15:14:39 +01002783 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002784 if (!entry)
2785 return -ENOMEM;
2786
2787 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2788 entry->key.priority = priority;
2789 entry->key.ucast_routing.eth_type = eth_type;
2790 entry->key.ucast_routing.dst4 = dst;
2791 entry->key.ucast_routing.dst4_mask = dst_mask;
2792 entry->key.ucast_routing.goto_tbl = goto_tbl;
2793 entry->key.ucast_routing.group_id = group_id;
2794 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2795 ucast_routing.group_id);
2796
Jiri Pirko76c6f942015-09-24 10:02:44 +02002797 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002798}
2799
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002800static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002801 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002802 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002803 const u8 *eth_src, const u8 *eth_src_mask,
2804 const u8 *eth_dst, const u8 *eth_dst_mask,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002805 __be16 eth_type, __be16 vlan_id,
2806 __be16 vlan_id_mask, u8 ip_proto,
2807 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002808 u32 group_id)
2809{
2810 u32 priority;
2811 struct rocker_flow_tbl_entry *entry;
2812
Jiri Pirkob15edf82016-02-16 15:14:39 +01002813 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002814 if (!entry)
2815 return -ENOMEM;
2816
2817 priority = ROCKER_PRIORITY_ACL_NORMAL;
2818 if (eth_dst && eth_dst_mask) {
Simon Horman629161f2015-04-30 15:21:29 +09002819 if (ether_addr_equal(eth_dst_mask, mcast_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002820 priority = ROCKER_PRIORITY_ACL_DFLT;
2821 else if (is_link_local_ether_addr(eth_dst))
2822 priority = ROCKER_PRIORITY_ACL_CTRL;
2823 }
2824
2825 entry->key.priority = priority;
2826 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002827 entry->key.acl.in_pport = in_pport;
2828 entry->key.acl.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002829
2830 if (eth_src)
2831 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2832 if (eth_src_mask)
2833 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2834 if (eth_dst)
2835 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2836 if (eth_dst_mask)
2837 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2838
2839 entry->key.acl.eth_type = eth_type;
2840 entry->key.acl.vlan_id = vlan_id;
2841 entry->key.acl.vlan_id_mask = vlan_id_mask;
2842 entry->key.acl.ip_proto = ip_proto;
2843 entry->key.acl.ip_proto_mask = ip_proto_mask;
2844 entry->key.acl.ip_tos = ip_tos;
2845 entry->key.acl.ip_tos_mask = ip_tos_mask;
2846 entry->key.acl.group_id = group_id;
2847
Jiri Pirko76c6f942015-09-24 10:02:44 +02002848 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002849}
2850
2851static struct rocker_group_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002852rocker_group_tbl_find(const struct rocker *rocker,
2853 const struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002854{
2855 struct rocker_group_tbl_entry *found;
2856
2857 hash_for_each_possible(rocker->group_tbl, found,
2858 entry, match->group_id) {
2859 if (found->group_id == match->group_id)
2860 return found;
2861 }
2862
2863 return NULL;
2864}
2865
Jiri Pirko76c6f942015-09-24 10:02:44 +02002866static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002867 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002868{
2869 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2870 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2871 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
Jiri Pirkob15edf82016-02-16 15:14:39 +01002872 rocker_kfree(trans, entry->group_ids);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002873 break;
2874 default:
2875 break;
2876 }
Jiri Pirkob15edf82016-02-16 15:14:39 +01002877 rocker_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002878}
2879
2880static int rocker_group_tbl_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002881 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002882 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002883{
2884 struct rocker *rocker = rocker_port->rocker;
2885 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002886 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002887
Scott Feldman179f9a22015-06-12 21:35:46 -07002888 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002889
2890 found = rocker_group_tbl_find(rocker, match);
2891
2892 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002893 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002894 hash_del(&found->entry);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002895 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002896 found = match;
2897 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2898 } else {
2899 found = match;
2900 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2901 }
2902
Jiri Pirko76c6f942015-09-24 10:02:44 +02002903 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002904 hash_add(rocker->group_tbl, &found->entry, found->group_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002905
Scott Feldman179f9a22015-06-12 21:35:46 -07002906 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002907
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002908 if (!switchdev_trans_ph_prepare(trans))
Jiri Pirko53901cc2016-02-16 15:14:49 +01002909 return rocker_cmd_exec(rocker_port,
2910 rocker_flags_nowait(flags),
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002911 rocker_cmd_group_tbl_add,
2912 found, NULL, NULL);
2913 return 0;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002914}
2915
2916static int rocker_group_tbl_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002917 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002918 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002919{
2920 struct rocker *rocker = rocker_port->rocker;
2921 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002922 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002923 int err = 0;
2924
Scott Feldman179f9a22015-06-12 21:35:46 -07002925 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002926
2927 found = rocker_group_tbl_find(rocker, match);
2928
2929 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002930 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002931 hash_del(&found->entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002932 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2933 }
2934
Scott Feldman179f9a22015-06-12 21:35:46 -07002935 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002936
Jiri Pirko76c6f942015-09-24 10:02:44 +02002937 rocker_group_tbl_entry_free(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002938
2939 if (found) {
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002940 if (!switchdev_trans_ph_prepare(trans))
Jiri Pirko53901cc2016-02-16 15:14:49 +01002941 err = rocker_cmd_exec(rocker_port,
2942 rocker_flags_nowait(flags),
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002943 rocker_cmd_group_tbl_del,
2944 found, NULL, NULL);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002945 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002946 }
2947
2948 return err;
2949}
2950
2951static int rocker_group_tbl_do(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002952 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002953 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002954{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002955 if (flags & ROCKER_OP_FLAG_REMOVE)
Jiri Pirko76c6f942015-09-24 10:02:44 +02002956 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002957 else
Jiri Pirko76c6f942015-09-24 10:02:44 +02002958 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002959}
2960
2961static int rocker_group_l2_interface(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002962 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002963 __be16 vlan_id, u32 out_pport,
2964 int pop_vlan)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002965{
2966 struct rocker_group_tbl_entry *entry;
2967
Jiri Pirkob15edf82016-02-16 15:14:39 +01002968 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002969 if (!entry)
2970 return -ENOMEM;
2971
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002972 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002973 entry->l2_interface.pop_vlan = pop_vlan;
2974
Jiri Pirko76c6f942015-09-24 10:02:44 +02002975 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002976}
2977
2978static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002979 struct switchdev_trans *trans,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002980 int flags, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002981 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002982{
2983 struct rocker_group_tbl_entry *entry;
2984
Jiri Pirkob15edf82016-02-16 15:14:39 +01002985 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002986 if (!entry)
2987 return -ENOMEM;
2988
2989 entry->group_id = group_id;
2990 entry->group_count = group_count;
2991
Jiri Pirkob15edf82016-02-16 15:14:39 +01002992 entry->group_ids = rocker_kcalloc(trans, flags,
2993 group_count, sizeof(u32));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002994 if (!entry->group_ids) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01002995 rocker_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002996 return -ENOMEM;
2997 }
2998 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2999
Jiri Pirko76c6f942015-09-24 10:02:44 +02003000 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003001}
3002
3003static int rocker_group_l2_flood(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003004 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003005 __be16 vlan_id, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09003006 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003007{
Jiri Pirko76c6f942015-09-24 10:02:44 +02003008 return rocker_group_l2_fan_out(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003009 group_count, group_ids,
3010 group_id);
3011}
3012
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003013static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003014 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003015 u32 index, const u8 *src_mac, const u8 *dst_mac,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003016 __be16 vlan_id, bool ttl_check, u32 pport)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003017{
3018 struct rocker_group_tbl_entry *entry;
3019
Jiri Pirkob15edf82016-02-16 15:14:39 +01003020 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003021 if (!entry)
3022 return -ENOMEM;
3023
3024 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
3025 if (src_mac)
3026 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
3027 if (dst_mac)
3028 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
3029 entry->l3_unicast.vlan_id = vlan_id;
3030 entry->l3_unicast.ttl_check = ttl_check;
3031 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
3032
Jiri Pirko76c6f942015-09-24 10:02:44 +02003033 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003034}
3035
3036static struct rocker_neigh_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003037rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003038{
3039 struct rocker_neigh_tbl_entry *found;
3040
Scott Feldman0f43deb2015-03-06 15:54:51 -08003041 hash_for_each_possible(rocker->neigh_tbl, found,
3042 entry, be32_to_cpu(ip_addr))
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003043 if (found->ip_addr == ip_addr)
3044 return found;
3045
3046 return NULL;
3047}
3048
3049static void _rocker_neigh_add(struct rocker *rocker,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003050 struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003051 struct rocker_neigh_tbl_entry *entry)
3052{
Jiri Pirko76c6f942015-09-24 10:02:44 +02003053 if (!switchdev_trans_ph_commit(trans))
Scott Feldman4d81db42015-06-12 21:24:40 -07003054 entry->index = rocker->neigh_tbl_next_index++;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003055 if (switchdev_trans_ph_prepare(trans))
Simon Horman550ecc92015-05-21 12:40:16 +09003056 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003057 entry->ref_count++;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003058 hash_add(rocker->neigh_tbl, &entry->entry,
3059 be32_to_cpu(entry->ip_addr));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003060}
3061
Jiri Pirko76c6f942015-09-24 10:02:44 +02003062static void _rocker_neigh_del(struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003063 struct rocker_neigh_tbl_entry *entry)
3064{
Jiri Pirko76c6f942015-09-24 10:02:44 +02003065 if (switchdev_trans_ph_prepare(trans))
Simon Horman550ecc92015-05-21 12:40:16 +09003066 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003067 if (--entry->ref_count == 0) {
3068 hash_del(&entry->entry);
Jiri Pirkob15edf82016-02-16 15:14:39 +01003069 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003070 }
3071}
3072
Scott Feldmanc4f20322015-05-10 09:47:50 -07003073static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003074 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09003075 const u8 *eth_dst, bool ttl_check)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003076{
3077 if (eth_dst) {
3078 ether_addr_copy(entry->eth_dst, eth_dst);
3079 entry->ttl_check = ttl_check;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003080 } else if (!switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003081 entry->ref_count++;
3082 }
3083}
3084
3085static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003086 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09003087 int flags, __be32 ip_addr, const u8 *eth_dst)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003088{
3089 struct rocker *rocker = rocker_port->rocker;
3090 struct rocker_neigh_tbl_entry *entry;
3091 struct rocker_neigh_tbl_entry *found;
3092 unsigned long lock_flags;
3093 __be16 eth_type = htons(ETH_P_IP);
3094 enum rocker_of_dpa_table_id goto_tbl =
3095 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3096 u32 group_id;
3097 u32 priority = 0;
3098 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3099 bool updating;
3100 bool removing;
3101 int err = 0;
3102
Jiri Pirkob15edf82016-02-16 15:14:39 +01003103 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003104 if (!entry)
3105 return -ENOMEM;
3106
3107 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3108
3109 found = rocker_neigh_tbl_find(rocker, ip_addr);
3110
3111 updating = found && adding;
3112 removing = found && !adding;
3113 adding = !found && adding;
3114
3115 if (adding) {
3116 entry->ip_addr = ip_addr;
3117 entry->dev = rocker_port->dev;
3118 ether_addr_copy(entry->eth_dst, eth_dst);
3119 entry->ttl_check = true;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003120 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003121 } else if (removing) {
3122 memcpy(entry, found, sizeof(*entry));
Jiri Pirko76c6f942015-09-24 10:02:44 +02003123 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003124 } else if (updating) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003125 _rocker_neigh_update(found, trans, eth_dst, true);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003126 memcpy(entry, found, sizeof(*entry));
3127 } else {
3128 err = -ENOENT;
3129 }
3130
3131 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3132
3133 if (err)
3134 goto err_out;
3135
3136 /* For each active neighbor, we have an L3 unicast group and
3137 * a /32 route to the neighbor, which uses the L3 unicast
3138 * group. The L3 unicast group can also be referred to by
3139 * other routes' nexthops.
3140 */
3141
Jiri Pirko76c6f942015-09-24 10:02:44 +02003142 err = rocker_group_l3_unicast(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003143 entry->index,
3144 rocker_port->dev->dev_addr,
3145 entry->eth_dst,
3146 rocker_port->internal_vlan_id,
3147 entry->ttl_check,
3148 rocker_port->pport);
3149 if (err) {
3150 netdev_err(rocker_port->dev,
3151 "Error (%d) L3 unicast group index %d\n",
3152 err, entry->index);
3153 goto err_out;
3154 }
3155
3156 if (adding || removing) {
3157 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003158 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003159 eth_type, ip_addr,
3160 inet_make_mask(32),
3161 priority, goto_tbl,
3162 group_id, flags);
3163
3164 if (err)
3165 netdev_err(rocker_port->dev,
3166 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3167 err, &entry->ip_addr, group_id);
3168 }
3169
3170err_out:
3171 if (!adding)
Jiri Pirkob15edf82016-02-16 15:14:39 +01003172 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003173
3174 return err;
3175}
3176
3177static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003178 struct switchdev_trans *trans,
3179 __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003180{
3181 struct net_device *dev = rocker_port->dev;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003182 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003183 int err = 0;
3184
Ying Xue4133fc02015-05-15 12:53:21 +08003185 if (!n) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003186 n = neigh_create(&arp_tbl, &ip_addr, dev);
Ying Xue4133fc02015-05-15 12:53:21 +08003187 if (IS_ERR(n))
3188 return IS_ERR(n);
3189 }
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003190
3191 /* If the neigh is already resolved, then go ahead and
3192 * install the entry, otherwise start the ARP process to
3193 * resolve the neigh.
3194 */
3195
3196 if (n->nud_state & NUD_VALID)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003197 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003198 ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003199 else
3200 neigh_event_send(n, NULL);
3201
Ying Xue4133fc02015-05-15 12:53:21 +08003202 neigh_release(n);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003203 return err;
3204}
3205
Scott Feldmanc4f20322015-05-10 09:47:50 -07003206static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003207 struct switchdev_trans *trans, int flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003208 __be32 ip_addr, u32 *index)
3209{
3210 struct rocker *rocker = rocker_port->rocker;
3211 struct rocker_neigh_tbl_entry *entry;
3212 struct rocker_neigh_tbl_entry *found;
3213 unsigned long lock_flags;
3214 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3215 bool updating;
3216 bool removing;
3217 bool resolved = true;
3218 int err = 0;
3219
Jiri Pirkob15edf82016-02-16 15:14:39 +01003220 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003221 if (!entry)
3222 return -ENOMEM;
3223
3224 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3225
3226 found = rocker_neigh_tbl_find(rocker, ip_addr);
3227 if (found)
3228 *index = found->index;
3229
3230 updating = found && adding;
3231 removing = found && !adding;
3232 adding = !found && adding;
3233
3234 if (adding) {
3235 entry->ip_addr = ip_addr;
3236 entry->dev = rocker_port->dev;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003237 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003238 *index = entry->index;
3239 resolved = false;
3240 } else if (removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003241 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003242 } else if (updating) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003243 _rocker_neigh_update(found, trans, NULL, false);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003244 resolved = !is_zero_ether_addr(found->eth_dst);
3245 } else {
3246 err = -ENOENT;
3247 }
3248
3249 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3250
3251 if (!adding)
Jiri Pirkob15edf82016-02-16 15:14:39 +01003252 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003253
3254 if (err)
3255 return err;
3256
3257 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3258
3259 if (!resolved)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003260 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003261
3262 return err;
3263}
3264
Scott Feldman6c707942014-11-28 14:34:28 +01003265static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003266 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003267 int flags, __be16 vlan_id)
3268{
3269 struct rocker_port *p;
Simon Hormane5054642015-05-25 14:28:36 +09003270 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003271 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
Scott Feldman04f49fa2015-03-15 23:04:46 -07003272 u32 *group_ids;
Scott Feldman6c707942014-11-28 14:34:28 +01003273 u8 group_count = 0;
Scott Feldman04f49fa2015-03-15 23:04:46 -07003274 int err = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01003275 int i;
3276
Jiri Pirkob15edf82016-02-16 15:14:39 +01003277 group_ids = rocker_kcalloc(trans, flags,
3278 rocker->port_count, sizeof(u32));
Scott Feldman04f49fa2015-03-15 23:04:46 -07003279 if (!group_ids)
3280 return -ENOMEM;
3281
Scott Feldman6c707942014-11-28 14:34:28 +01003282 /* Adjust the flood group for this VLAN. The flood group
3283 * references an L2 interface group for each port in this
3284 * VLAN.
3285 */
3286
3287 for (i = 0; i < rocker->port_count; i++) {
3288 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003289 if (!p)
3290 continue;
Scott Feldman6c707942014-11-28 14:34:28 +01003291 if (!rocker_port_is_bridged(p))
3292 continue;
3293 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3294 group_ids[group_count++] =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003295 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003296 }
3297 }
3298
3299 /* If there are no bridged ports in this VLAN, we're done */
3300 if (group_count == 0)
Scott Feldman04f49fa2015-03-15 23:04:46 -07003301 goto no_ports_in_vlan;
Scott Feldman6c707942014-11-28 14:34:28 +01003302
Jiri Pirko76c6f942015-09-24 10:02:44 +02003303 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003304 group_count, group_ids, group_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003305 if (err)
3306 netdev_err(rocker_port->dev,
3307 "Error (%d) port VLAN l2 flood group\n", err);
3308
Scott Feldman04f49fa2015-03-15 23:04:46 -07003309no_ports_in_vlan:
Jiri Pirkob15edf82016-02-16 15:14:39 +01003310 rocker_kfree(trans, group_ids);
Scott Feldman6c707942014-11-28 14:34:28 +01003311 return err;
3312}
3313
3314static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003315 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003316 __be16 vlan_id, bool pop_vlan)
Scott Feldman6c707942014-11-28 14:34:28 +01003317{
Simon Hormane5054642015-05-25 14:28:36 +09003318 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003319 struct rocker_port *p;
3320 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003321 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003322 int ref = 0;
3323 int err;
3324 int i;
3325
3326 /* An L2 interface group for this port in this VLAN, but
3327 * only when port STP state is LEARNING|FORWARDING.
3328 */
3329
3330 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3331 rocker_port->stp_state == BR_STATE_FORWARDING) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003332 out_pport = rocker_port->pport;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003333 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003334 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003335 if (err) {
3336 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003337 "Error (%d) port VLAN l2 group for pport %d\n",
3338 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003339 return err;
3340 }
3341 }
3342
3343 /* An L2 interface group for this VLAN to CPU port.
3344 * Add when first port joins this VLAN and destroy when
3345 * last port leaves this VLAN.
3346 */
3347
3348 for (i = 0; i < rocker->port_count; i++) {
3349 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003350 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003351 ref++;
3352 }
3353
3354 if ((!adding || ref != 1) && (adding || ref != 0))
3355 return 0;
3356
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003357 out_pport = 0;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003358 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003359 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003360 if (err) {
3361 netdev_err(rocker_port->dev,
3362 "Error (%d) port VLAN l2 group for CPU port\n", err);
3363 return err;
3364 }
3365
3366 return 0;
3367}
3368
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003369static struct rocker_ctrl {
3370 const u8 *eth_dst;
3371 const u8 *eth_dst_mask;
Jiri Pirko11e6c652014-12-03 14:14:54 +01003372 __be16 eth_type;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003373 bool acl;
3374 bool bridge;
3375 bool term;
3376 bool copy_to_cpu;
3377} rocker_ctrls[] = {
3378 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3379 /* pass link local multicast pkts up to CPU for filtering */
3380 .eth_dst = ll_mac,
3381 .eth_dst_mask = ll_mask,
3382 .acl = true,
3383 },
3384 [ROCKER_CTRL_LOCAL_ARP] = {
3385 /* pass local ARP pkts up to CPU */
3386 .eth_dst = zero_mac,
3387 .eth_dst_mask = zero_mac,
3388 .eth_type = htons(ETH_P_ARP),
3389 .acl = true,
3390 },
3391 [ROCKER_CTRL_IPV4_MCAST] = {
3392 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3393 .eth_dst = ipv4_mcast,
3394 .eth_dst_mask = ipv4_mask,
3395 .eth_type = htons(ETH_P_IP),
3396 .term = true,
3397 .copy_to_cpu = true,
3398 },
3399 [ROCKER_CTRL_IPV6_MCAST] = {
3400 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3401 .eth_dst = ipv6_mcast,
3402 .eth_dst_mask = ipv6_mask,
3403 .eth_type = htons(ETH_P_IPV6),
3404 .term = true,
3405 .copy_to_cpu = true,
3406 },
3407 [ROCKER_CTRL_DFLT_BRIDGING] = {
3408 /* flood any pkts on vlan */
3409 .bridge = true,
3410 .copy_to_cpu = true,
3411 },
Simon Horman82549732015-07-16 10:39:14 +09003412 [ROCKER_CTRL_DFLT_OVS] = {
3413 /* pass all pkts up to CPU */
3414 .eth_dst = zero_mac,
3415 .eth_dst_mask = zero_mac,
3416 .acl = true,
3417 },
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003418};
3419
3420static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003421 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003422 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003423{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003424 u32 in_pport = rocker_port->pport;
3425 u32 in_pport_mask = 0xffffffff;
3426 u32 out_pport = 0;
Simon Hormane5054642015-05-25 14:28:36 +09003427 const u8 *eth_src = NULL;
3428 const u8 *eth_src_mask = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003429 __be16 vlan_id_mask = htons(0xffff);
3430 u8 ip_proto = 0;
3431 u8 ip_proto_mask = 0;
3432 u8 ip_tos = 0;
3433 u8 ip_tos_mask = 0;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003434 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003435 int err;
3436
Jiri Pirko76c6f942015-09-24 10:02:44 +02003437 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003438 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003439 eth_src, eth_src_mask,
3440 ctrl->eth_dst, ctrl->eth_dst_mask,
3441 ctrl->eth_type,
3442 vlan_id, vlan_id_mask,
3443 ip_proto, ip_proto_mask,
3444 ip_tos, ip_tos_mask,
3445 group_id);
3446
3447 if (err)
3448 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3449
3450 return err;
3451}
3452
Scott Feldman6c707942014-11-28 14:34:28 +01003453static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003454 struct switchdev_trans *trans,
3455 int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003456 const struct rocker_ctrl *ctrl,
Scott Feldman6c707942014-11-28 14:34:28 +01003457 __be16 vlan_id)
3458{
3459 enum rocker_of_dpa_table_id goto_tbl =
3460 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3461 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3462 u32 tunnel_id = 0;
3463 int err;
3464
3465 if (!rocker_port_is_bridged(rocker_port))
3466 return 0;
3467
Jiri Pirko76c6f942015-09-24 10:02:44 +02003468 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003469 ctrl->eth_dst, ctrl->eth_dst_mask,
3470 vlan_id, tunnel_id,
3471 goto_tbl, group_id, ctrl->copy_to_cpu);
3472
3473 if (err)
3474 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3475
3476 return err;
3477}
3478
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003479static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003480 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003481 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003482{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003483 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003484 __be16 vlan_id_mask = htons(0xffff);
3485 int err;
3486
3487 if (ntohs(vlan_id) == 0)
3488 vlan_id = rocker_port->internal_vlan_id;
3489
Jiri Pirko76c6f942015-09-24 10:02:44 +02003490 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003491 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003492 ctrl->eth_type, ctrl->eth_dst,
3493 ctrl->eth_dst_mask, vlan_id,
3494 vlan_id_mask, ctrl->copy_to_cpu,
3495 flags);
3496
3497 if (err)
3498 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3499
3500 return err;
3501}
3502
Scott Feldmanc4f20322015-05-10 09:47:50 -07003503static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003504 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003505 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003506{
3507 if (ctrl->acl)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003508 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003509 ctrl, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003510 if (ctrl->bridge)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003511 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003512 ctrl, vlan_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003513
3514 if (ctrl->term)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003515 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003516 ctrl, vlan_id);
3517
3518 return -EOPNOTSUPP;
3519}
3520
3521static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003522 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003523 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003524{
3525 int err = 0;
3526 int i;
3527
3528 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3529 if (rocker_port->ctrls[i]) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003530 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003531 &rocker_ctrls[i], vlan_id);
3532 if (err)
3533 return err;
3534 }
3535 }
3536
3537 return err;
3538}
3539
Scott Feldmanc4f20322015-05-10 09:47:50 -07003540static int rocker_port_ctrl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003541 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003542 const struct rocker_ctrl *ctrl)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003543{
3544 u16 vid;
3545 int err = 0;
3546
3547 for (vid = 1; vid < VLAN_N_VID; vid++) {
3548 if (!test_bit(vid, rocker_port->vlan_bitmap))
3549 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003550 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003551 ctrl, htons(vid));
3552 if (err)
3553 break;
3554 }
3555
3556 return err;
3557}
3558
Scott Feldmanc4f20322015-05-10 09:47:50 -07003559static int rocker_port_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003560 struct switchdev_trans *trans, int flags, u16 vid)
Scott Feldman6c707942014-11-28 14:34:28 +01003561{
3562 enum rocker_of_dpa_table_id goto_tbl =
3563 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003564 u32 in_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003565 __be16 vlan_id = htons(vid);
3566 __be16 vlan_id_mask = htons(0xffff);
3567 __be16 internal_vlan_id;
3568 bool untagged;
3569 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3570 int err;
3571
3572 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3573
Scott Feldman9228ad22015-05-10 09:47:54 -07003574 if (adding && test_bit(ntohs(internal_vlan_id),
3575 rocker_port->vlan_bitmap))
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003576 return 0; /* already added */
Scott Feldman9228ad22015-05-10 09:47:54 -07003577 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3578 rocker_port->vlan_bitmap))
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003579 return 0; /* already removed */
Scott Feldman6c707942014-11-28 14:34:28 +01003580
Scott Feldman9228ad22015-05-10 09:47:54 -07003581 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3582
Scott Feldman6c707942014-11-28 14:34:28 +01003583 if (adding) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003584 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003585 internal_vlan_id);
3586 if (err) {
3587 netdev_err(rocker_port->dev,
3588 "Error (%d) port ctrl vlan add\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003589 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003590 }
3591 }
3592
Jiri Pirko76c6f942015-09-24 10:02:44 +02003593 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003594 internal_vlan_id, untagged);
3595 if (err) {
3596 netdev_err(rocker_port->dev,
3597 "Error (%d) port VLAN l2 groups\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003598 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003599 }
3600
Jiri Pirko76c6f942015-09-24 10:02:44 +02003601 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003602 internal_vlan_id);
3603 if (err) {
3604 netdev_err(rocker_port->dev,
3605 "Error (%d) port VLAN l2 flood group\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003606 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003607 }
3608
Jiri Pirko76c6f942015-09-24 10:02:44 +02003609 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003610 in_pport, vlan_id, vlan_id_mask,
Scott Feldman6c707942014-11-28 14:34:28 +01003611 goto_tbl, untagged, internal_vlan_id);
3612 if (err)
3613 netdev_err(rocker_port->dev,
3614 "Error (%d) port VLAN table\n", err);
3615
Scott Feldman9228ad22015-05-10 09:47:54 -07003616err_out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02003617 if (switchdev_trans_ph_prepare(trans))
Scott Feldman9228ad22015-05-10 09:47:54 -07003618 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3619
Scott Feldman6c707942014-11-28 14:34:28 +01003620 return err;
3621}
3622
Scott Feldmanc4f20322015-05-10 09:47:50 -07003623static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003624 struct switchdev_trans *trans, int flags)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003625{
3626 enum rocker_of_dpa_table_id goto_tbl;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003627 u32 in_pport;
3628 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003629 int err;
3630
3631 /* Normal Ethernet Frames. Matches pkts from any local physical
3632 * ports. Goto VLAN tbl.
3633 */
3634
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003635 in_pport = 0;
3636 in_pport_mask = 0xffff0000;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003637 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3638
Jiri Pirko76c6f942015-09-24 10:02:44 +02003639 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003640 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003641 goto_tbl);
3642 if (err)
3643 netdev_err(rocker_port->dev,
3644 "Error (%d) ingress port table entry\n", err);
3645
3646 return err;
3647}
3648
Scott Feldman6c707942014-11-28 14:34:28 +01003649struct rocker_fdb_learn_work {
3650 struct work_struct work;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003651 struct rocker_port *rocker_port;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003652 struct switchdev_trans *trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003653 int flags;
3654 u8 addr[ETH_ALEN];
3655 u16 vid;
3656};
3657
3658static void rocker_port_fdb_learn_work(struct work_struct *work)
3659{
Simon Hormane5054642015-05-25 14:28:36 +09003660 const struct rocker_fdb_learn_work *lw =
Scott Feldman6c707942014-11-28 14:34:28 +01003661 container_of(work, struct rocker_fdb_learn_work, work);
3662 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3663 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003664 struct switchdev_notifier_fdb_info info;
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003665
3666 info.addr = lw->addr;
3667 info.vid = lw->vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003668
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +01003669 rtnl_lock();
Thomas Graf51ace882014-11-28 14:34:32 +01003670 if (learned && removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003671 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003672 lw->rocker_port->dev, &info.info);
Thomas Graf51ace882014-11-28 14:34:32 +01003673 else if (learned && !removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003674 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003675 lw->rocker_port->dev, &info.info);
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +01003676 rtnl_unlock();
Scott Feldman6c707942014-11-28 14:34:28 +01003677
Jiri Pirkob15edf82016-02-16 15:14:39 +01003678 rocker_kfree(lw->trans, work);
Scott Feldman6c707942014-11-28 14:34:28 +01003679}
3680
3681static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003682 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003683 const u8 *addr, __be16 vlan_id)
Scott Feldman6c707942014-11-28 14:34:28 +01003684{
3685 struct rocker_fdb_learn_work *lw;
3686 enum rocker_of_dpa_table_id goto_tbl =
3687 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003688 u32 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003689 u32 tunnel_id = 0;
3690 u32 group_id = ROCKER_GROUP_NONE;
Scott Feldman5111f802014-11-28 14:34:30 +01003691 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
Scott Feldman6c707942014-11-28 14:34:28 +01003692 bool copy_to_cpu = false;
3693 int err;
3694
3695 if (rocker_port_is_bridged(rocker_port))
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003696 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003697
3698 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003699 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003700 NULL, vlan_id, tunnel_id, goto_tbl,
Scott Feldman6c707942014-11-28 14:34:28 +01003701 group_id, copy_to_cpu);
3702 if (err)
3703 return err;
3704 }
3705
Scott Feldman5111f802014-11-28 14:34:30 +01003706 if (!syncing)
3707 return 0;
3708
Scott Feldman6c707942014-11-28 14:34:28 +01003709 if (!rocker_port_is_bridged(rocker_port))
3710 return 0;
3711
Jiri Pirkob15edf82016-02-16 15:14:39 +01003712 lw = rocker_kzalloc(trans, flags, sizeof(*lw));
Scott Feldman6c707942014-11-28 14:34:28 +01003713 if (!lw)
3714 return -ENOMEM;
3715
3716 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3717
Scott Feldmanc4f20322015-05-10 09:47:50 -07003718 lw->rocker_port = rocker_port;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003719 lw->trans = trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003720 lw->flags = flags;
3721 ether_addr_copy(lw->addr, addr);
3722 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3723
Jiri Pirko76c6f942015-09-24 10:02:44 +02003724 if (switchdev_trans_ph_prepare(trans))
Jiri Pirkob15edf82016-02-16 15:14:39 +01003725 rocker_kfree(trans, lw);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003726 else
3727 schedule_work(&lw->work);
Scott Feldman6c707942014-11-28 14:34:28 +01003728
3729 return 0;
3730}
3731
3732static struct rocker_fdb_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003733rocker_fdb_tbl_find(const struct rocker *rocker,
3734 const struct rocker_fdb_tbl_entry *match)
Scott Feldman6c707942014-11-28 14:34:28 +01003735{
3736 struct rocker_fdb_tbl_entry *found;
3737
3738 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3739 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3740 return found;
3741
3742 return NULL;
3743}
3744
3745static int rocker_port_fdb(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003746 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003747 const unsigned char *addr,
3748 __be16 vlan_id, int flags)
3749{
3750 struct rocker *rocker = rocker_port->rocker;
3751 struct rocker_fdb_tbl_entry *fdb;
3752 struct rocker_fdb_tbl_entry *found;
3753 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3754 unsigned long lock_flags;
3755
Jiri Pirkob15edf82016-02-16 15:14:39 +01003756 fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
Scott Feldman6c707942014-11-28 14:34:28 +01003757 if (!fdb)
3758 return -ENOMEM;
3759
3760 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
Scott Feldmana471be42015-09-23 08:39:14 -07003761 fdb->touched = jiffies;
Scott Feldman4c660492015-09-23 08:39:15 -07003762 fdb->key.rocker_port = rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01003763 ether_addr_copy(fdb->key.addr, addr);
3764 fdb->key.vlan_id = vlan_id;
3765 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3766
3767 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3768
3769 found = rocker_fdb_tbl_find(rocker, fdb);
3770
Scott Feldmana471be42015-09-23 08:39:14 -07003771 if (found) {
3772 found->touched = jiffies;
3773 if (removing) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01003774 rocker_kfree(trans, fdb);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003775 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmana471be42015-09-23 08:39:14 -07003776 hash_del(&found->entry);
3777 }
3778 } else if (!removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003779 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmana471be42015-09-23 08:39:14 -07003780 hash_add(rocker->fdb_tbl, &fdb->entry,
3781 fdb->key_crc32);
Scott Feldman6c707942014-11-28 14:34:28 +01003782 }
3783
3784 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3785
3786 /* Check if adding and already exists, or removing and can't find */
3787 if (!found != !removing) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01003788 rocker_kfree(trans, fdb);
Scott Feldman6c707942014-11-28 14:34:28 +01003789 if (!found && removing)
3790 return 0;
3791 /* Refreshing existing to update aging timers */
3792 flags |= ROCKER_OP_FLAG_REFRESH;
3793 }
3794
Jiri Pirko76c6f942015-09-24 10:02:44 +02003795 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003796}
3797
Scott Feldmanc4f20322015-05-10 09:47:50 -07003798static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003799 struct switchdev_trans *trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003800{
3801 struct rocker *rocker = rocker_port->rocker;
3802 struct rocker_fdb_tbl_entry *found;
3803 unsigned long lock_flags;
Scott Feldman6c707942014-11-28 14:34:28 +01003804 struct hlist_node *tmp;
3805 int bkt;
3806 int err = 0;
3807
3808 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3809 rocker_port->stp_state == BR_STATE_FORWARDING)
3810 return 0;
3811
Jiri Pirkod33eeb62015-10-14 19:40:54 +02003812 flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
Scott Feldman179f9a22015-06-12 21:35:46 -07003813
Scott Feldman6c707942014-11-28 14:34:28 +01003814 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3815
3816 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07003817 if (found->key.rocker_port != rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +01003818 continue;
3819 if (!found->learned)
3820 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003821 err = rocker_port_fdb_learn(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003822 found->key.addr,
3823 found->key.vlan_id);
3824 if (err)
3825 goto err_out;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003826 if (!switchdev_trans_ph_prepare(trans))
Simon Horman3098ac32015-05-21 12:40:14 +09003827 hash_del(&found->entry);
Scott Feldman6c707942014-11-28 14:34:28 +01003828 }
3829
3830err_out:
3831 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3832
3833 return err;
3834}
3835
Scott Feldman52fe3e22015-09-23 08:39:18 -07003836static void rocker_fdb_cleanup(unsigned long data)
3837{
3838 struct rocker *rocker = (struct rocker *)data;
3839 struct rocker_port *rocker_port;
3840 struct rocker_fdb_tbl_entry *entry;
3841 struct hlist_node *tmp;
3842 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3843 unsigned long expires;
3844 unsigned long lock_flags;
3845 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3846 ROCKER_OP_FLAG_LEARNED;
3847 int bkt;
3848
3849 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3850
3851 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3852 if (!entry->learned)
3853 continue;
3854 rocker_port = entry->key.rocker_port;
3855 expires = entry->touched + rocker_port->ageing_time;
3856 if (time_before_eq(expires, jiffies)) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003857 rocker_port_fdb_learn(rocker_port, NULL,
Scott Feldman52fe3e22015-09-23 08:39:18 -07003858 flags, entry->key.addr,
3859 entry->key.vlan_id);
3860 hash_del(&entry->entry);
3861 } else if (time_before(expires, next_timer)) {
3862 next_timer = expires;
3863 }
3864 }
3865
3866 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3867
3868 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3869}
3870
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003871static int rocker_port_router_mac(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003872 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003873 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003874{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003875 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003876 __be16 eth_type;
3877 const u8 *dst_mac_mask = ff_mac;
3878 __be16 vlan_id_mask = htons(0xffff);
3879 bool copy_to_cpu = false;
3880 int err;
3881
3882 if (ntohs(vlan_id) == 0)
3883 vlan_id = rocker_port->internal_vlan_id;
3884
3885 eth_type = htons(ETH_P_IP);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003886 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003887 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003888 eth_type, rocker_port->dev->dev_addr,
3889 dst_mac_mask, vlan_id, vlan_id_mask,
3890 copy_to_cpu, flags);
3891 if (err)
3892 return err;
3893
3894 eth_type = htons(ETH_P_IPV6);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003895 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003896 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003897 eth_type, rocker_port->dev->dev_addr,
3898 dst_mac_mask, vlan_id, vlan_id_mask,
3899 copy_to_cpu, flags);
3900
3901 return err;
3902}
3903
Scott Feldmanc4f20322015-05-10 09:47:50 -07003904static int rocker_port_fwding(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003905 struct switchdev_trans *trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003906{
3907 bool pop_vlan;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003908 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003909 __be16 vlan_id;
3910 u16 vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003911 int err;
3912
3913 /* Port will be forwarding-enabled if its STP state is LEARNING
3914 * or FORWARDING. Traffic from CPU can still egress, regardless of
3915 * port STP state. Use L2 interface group on port VLANs as a way
3916 * to toggle port forwarding: if forwarding is disabled, L2
3917 * interface group will not exist.
3918 */
3919
3920 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3921 rocker_port->stp_state != BR_STATE_FORWARDING)
3922 flags |= ROCKER_OP_FLAG_REMOVE;
3923
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003924 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003925 for (vid = 1; vid < VLAN_N_VID; vid++) {
3926 if (!test_bit(vid, rocker_port->vlan_bitmap))
3927 continue;
3928 vlan_id = htons(vid);
3929 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003930 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003931 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003932 if (err) {
3933 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003934 "Error (%d) port VLAN l2 group for pport %d\n",
3935 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003936 return err;
3937 }
3938 }
3939
3940 return 0;
3941}
3942
Scott Feldmanc4f20322015-05-10 09:47:50 -07003943static int rocker_port_stp_update(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003944 struct switchdev_trans *trans, int flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07003945 u8 state)
Scott Feldman6c707942014-11-28 14:34:28 +01003946{
3947 bool want[ROCKER_CTRL_MAX] = { 0, };
Scott Feldmanc4f20322015-05-10 09:47:50 -07003948 bool prev_ctrls[ROCKER_CTRL_MAX];
Jiri Pirko76c6f942015-09-24 10:02:44 +02003949 u8 uninitialized_var(prev_state);
Scott Feldman6c707942014-11-28 14:34:28 +01003950 int err;
3951 int i;
3952
Jiri Pirko76c6f942015-09-24 10:02:44 +02003953 if (switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003954 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3955 prev_state = rocker_port->stp_state;
3956 }
3957
Scott Feldman6c707942014-11-28 14:34:28 +01003958 if (rocker_port->stp_state == state)
3959 return 0;
3960
3961 rocker_port->stp_state = state;
3962
3963 switch (state) {
3964 case BR_STATE_DISABLED:
3965 /* port is completely disabled */
3966 break;
3967 case BR_STATE_LISTENING:
3968 case BR_STATE_BLOCKING:
3969 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3970 break;
3971 case BR_STATE_LEARNING:
3972 case BR_STATE_FORWARDING:
Simon Horman82549732015-07-16 10:39:14 +09003973 if (!rocker_port_is_ovsed(rocker_port))
3974 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003975 want[ROCKER_CTRL_IPV4_MCAST] = true;
3976 want[ROCKER_CTRL_IPV6_MCAST] = true;
3977 if (rocker_port_is_bridged(rocker_port))
3978 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
Simon Horman82549732015-07-16 10:39:14 +09003979 else if (rocker_port_is_ovsed(rocker_port))
3980 want[ROCKER_CTRL_DFLT_OVS] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003981 else
3982 want[ROCKER_CTRL_LOCAL_ARP] = true;
3983 break;
3984 }
3985
3986 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3987 if (want[i] != rocker_port->ctrls[i]) {
Scott Feldman179f9a22015-06-12 21:35:46 -07003988 int ctrl_flags = flags |
3989 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003990 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003991 &rocker_ctrls[i]);
3992 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003993 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003994 rocker_port->ctrls[i] = want[i];
3995 }
3996 }
3997
Jiri Pirko76c6f942015-09-24 10:02:44 +02003998 err = rocker_port_fdb_flush(rocker_port, trans, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01003999 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07004000 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01004001
Jiri Pirko76c6f942015-09-24 10:02:44 +02004002 err = rocker_port_fwding(rocker_port, trans, flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07004003
4004err_out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02004005 if (switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07004006 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
4007 rocker_port->stp_state = prev_state;
4008 }
4009
4010 return err;
Scott Feldman6c707942014-11-28 14:34:28 +01004011}
4012
Scott Feldmanc4f20322015-05-10 09:47:50 -07004013static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004014 struct switchdev_trans *trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08004015{
4016 if (rocker_port_is_bridged(rocker_port))
4017 /* bridge STP will enable port */
4018 return 0;
4019
4020 /* port is not bridged, so simulate going to FORWARDING state */
Jiri Pirko76c6f942015-09-24 10:02:44 +02004021 return rocker_port_stp_update(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07004022 BR_STATE_FORWARDING);
Scott Feldmane47172a2015-02-25 20:15:38 -08004023}
4024
Scott Feldmanc4f20322015-05-10 09:47:50 -07004025static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004026 struct switchdev_trans *trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08004027{
4028 if (rocker_port_is_bridged(rocker_port))
4029 /* bridge STP will disable port */
4030 return 0;
4031
4032 /* port is not bridged, so simulate going to DISABLED state */
Jiri Pirko76c6f942015-09-24 10:02:44 +02004033 return rocker_port_stp_update(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07004034 BR_STATE_DISABLED);
Scott Feldmane47172a2015-02-25 20:15:38 -08004035}
4036
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004037static struct rocker_internal_vlan_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09004038rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004039{
4040 struct rocker_internal_vlan_tbl_entry *found;
4041
4042 hash_for_each_possible(rocker->internal_vlan_tbl, found,
4043 entry, ifindex) {
4044 if (found->ifindex == ifindex)
4045 return found;
4046 }
4047
4048 return NULL;
4049}
4050
4051static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
4052 int ifindex)
4053{
4054 struct rocker *rocker = rocker_port->rocker;
4055 struct rocker_internal_vlan_tbl_entry *entry;
4056 struct rocker_internal_vlan_tbl_entry *found;
4057 unsigned long lock_flags;
4058 int i;
4059
Simon Hormandf6a2062015-05-21 12:40:17 +09004060 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004061 if (!entry)
4062 return 0;
4063
4064 entry->ifindex = ifindex;
4065
4066 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4067
4068 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4069 if (found) {
Simon Hormandf6a2062015-05-21 12:40:17 +09004070 kfree(entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004071 goto found;
4072 }
4073
4074 found = entry;
4075 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
4076
4077 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
4078 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
4079 continue;
4080 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
4081 goto found;
4082 }
4083
4084 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
4085
4086found:
4087 found->ref_count++;
4088 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4089
4090 return found->vlan_id;
4091}
4092
Simon Hormane5054642015-05-25 14:28:36 +09004093static void
4094rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
4095 int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004096{
4097 struct rocker *rocker = rocker_port->rocker;
4098 struct rocker_internal_vlan_tbl_entry *found;
4099 unsigned long lock_flags;
4100 unsigned long bit;
4101
4102 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4103
4104 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4105 if (!found) {
4106 netdev_err(rocker_port->dev,
4107 "ifindex (%d) not found in internal VLAN tbl\n",
4108 ifindex);
4109 goto not_found;
4110 }
4111
4112 if (--found->ref_count <= 0) {
4113 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
4114 clear_bit(bit, rocker->internal_vlan_bitmap);
4115 hash_del(&found->entry);
Simon Hormandf6a2062015-05-21 12:40:17 +09004116 kfree(found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004117 }
4118
4119not_found:
4120 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4121}
4122
Scott Feldmanc4f20322015-05-10 09:47:50 -07004123static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004124 struct switchdev_trans *trans, __be32 dst,
Simon Hormane5054642015-05-25 14:28:36 +09004125 int dst_len, const struct fib_info *fi,
4126 u32 tb_id, int flags)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004127{
Simon Hormane5054642015-05-25 14:28:36 +09004128 const struct fib_nh *nh;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004129 __be16 eth_type = htons(ETH_P_IP);
4130 __be32 dst_mask = inet_make_mask(dst_len);
4131 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4132 u32 priority = fi->fib_priority;
4133 enum rocker_of_dpa_table_id goto_tbl =
4134 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4135 u32 group_id;
4136 bool nh_on_port;
4137 bool has_gw;
4138 u32 index;
4139 int err;
4140
4141 /* XXX support ECMP */
4142
4143 nh = fi->fib_nh;
4144 nh_on_port = (fi->fib_dev == rocker_port->dev);
4145 has_gw = !!nh->nh_gw;
4146
4147 if (has_gw && nh_on_port) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02004148 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004149 nh->nh_gw, &index);
4150 if (err)
4151 return err;
4152
4153 group_id = ROCKER_GROUP_L3_UNICAST(index);
4154 } else {
4155 /* Send to CPU for processing */
4156 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4157 }
4158
Jiri Pirko76c6f942015-09-24 10:02:44 +02004159 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004160 dst_mask, priority, goto_tbl,
4161 group_id, flags);
4162 if (err)
4163 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4164 err, &dst);
4165
4166 return err;
4167}
4168
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004169/*****************
4170 * Net device ops
4171 *****************/
4172
4173static int rocker_port_open(struct net_device *dev)
4174{
4175 struct rocker_port *rocker_port = netdev_priv(dev);
4176 int err;
4177
4178 err = rocker_port_dma_rings_init(rocker_port);
4179 if (err)
4180 return err;
4181
4182 err = request_irq(rocker_msix_tx_vector(rocker_port),
4183 rocker_tx_irq_handler, 0,
4184 rocker_driver_name, rocker_port);
4185 if (err) {
4186 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4187 goto err_request_tx_irq;
4188 }
4189
4190 err = request_irq(rocker_msix_rx_vector(rocker_port),
4191 rocker_rx_irq_handler, 0,
4192 rocker_driver_name, rocker_port);
4193 if (err) {
4194 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4195 goto err_request_rx_irq;
4196 }
4197
Jiri Pirkoe4201142016-02-16 15:14:45 +01004198 err = rocker_world_port_open(rocker_port);
4199 if (err) {
4200 netdev_err(rocker_port->dev, "cannot open port in world\n");
4201 goto err_world_port_open;
4202 }
4203
Jiri Pirko76c6f942015-09-24 10:02:44 +02004204 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01004205 if (err)
Scott Feldmane47172a2015-02-25 20:15:38 -08004206 goto err_fwd_enable;
Scott Feldman6c707942014-11-28 14:34:28 +01004207
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004208 napi_enable(&rocker_port->napi_tx);
4209 napi_enable(&rocker_port->napi_rx);
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004210 if (!dev->proto_down)
4211 rocker_port_set_enable(rocker_port, true);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004212 netif_start_queue(dev);
4213 return 0;
4214
Scott Feldmane47172a2015-02-25 20:15:38 -08004215err_fwd_enable:
Jiri Pirkoe4201142016-02-16 15:14:45 +01004216err_world_port_open:
Scott Feldman6c707942014-11-28 14:34:28 +01004217 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004218err_request_rx_irq:
4219 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4220err_request_tx_irq:
4221 rocker_port_dma_rings_fini(rocker_port);
4222 return err;
4223}
4224
4225static int rocker_port_stop(struct net_device *dev)
4226{
4227 struct rocker_port *rocker_port = netdev_priv(dev);
4228
4229 netif_stop_queue(dev);
4230 rocker_port_set_enable(rocker_port, false);
4231 napi_disable(&rocker_port->napi_rx);
4232 napi_disable(&rocker_port->napi_tx);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004233 rocker_world_port_stop(rocker_port);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004234 rocker_port_fwd_disable(rocker_port, NULL,
Scott Feldmanf66feaa2015-06-12 21:35:50 -07004235 ROCKER_OP_FLAG_NOWAIT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004236 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4237 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4238 rocker_port_dma_rings_fini(rocker_port);
4239
4240 return 0;
4241}
4242
Simon Hormane5054642015-05-25 14:28:36 +09004243static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4244 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004245{
Simon Hormane5054642015-05-25 14:28:36 +09004246 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004247 struct pci_dev *pdev = rocker->pdev;
Simon Hormane5054642015-05-25 14:28:36 +09004248 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004249 struct rocker_tlv *attr;
4250 int rem;
4251
4252 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4253 if (!attrs[ROCKER_TLV_TX_FRAGS])
4254 return;
4255 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
Simon Hormane5054642015-05-25 14:28:36 +09004256 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004257 dma_addr_t dma_handle;
4258 size_t len;
4259
4260 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4261 continue;
4262 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4263 attr);
4264 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4265 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4266 continue;
4267 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4268 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4269 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4270 }
4271}
4272
Simon Hormane5054642015-05-25 14:28:36 +09004273static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004274 struct rocker_desc_info *desc_info,
4275 char *buf, size_t buf_len)
4276{
Simon Hormane5054642015-05-25 14:28:36 +09004277 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004278 struct pci_dev *pdev = rocker->pdev;
4279 dma_addr_t dma_handle;
4280 struct rocker_tlv *frag;
4281
4282 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4283 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4284 if (net_ratelimit())
4285 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4286 return -EIO;
4287 }
4288 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4289 if (!frag)
4290 goto unmap_frag;
4291 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4292 dma_handle))
4293 goto nest_cancel;
4294 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4295 buf_len))
4296 goto nest_cancel;
4297 rocker_tlv_nest_end(desc_info, frag);
4298 return 0;
4299
4300nest_cancel:
4301 rocker_tlv_nest_cancel(desc_info, frag);
4302unmap_frag:
4303 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4304 return -EMSGSIZE;
4305}
4306
4307static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4308{
4309 struct rocker_port *rocker_port = netdev_priv(dev);
4310 struct rocker *rocker = rocker_port->rocker;
4311 struct rocker_desc_info *desc_info;
4312 struct rocker_tlv *frags;
4313 int i;
4314 int err;
4315
4316 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4317 if (unlikely(!desc_info)) {
4318 if (net_ratelimit())
4319 netdev_err(dev, "tx ring full when queue awake\n");
4320 return NETDEV_TX_BUSY;
4321 }
4322
4323 rocker_desc_cookie_ptr_set(desc_info, skb);
4324
4325 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4326 if (!frags)
4327 goto out;
4328 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4329 skb->data, skb_headlen(skb));
4330 if (err)
4331 goto nest_cancel;
Jiri Pirko95b9be62015-08-02 20:56:38 +02004332 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4333 err = skb_linearize(skb);
4334 if (err)
4335 goto unmap_frags;
4336 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004337
4338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4339 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4340
4341 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4342 skb_frag_address(frag),
4343 skb_frag_size(frag));
4344 if (err)
4345 goto unmap_frags;
4346 }
4347 rocker_tlv_nest_end(desc_info, frags);
4348
4349 rocker_desc_gen_clear(desc_info);
4350 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4351
4352 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4353 if (!desc_info)
4354 netif_stop_queue(dev);
4355
4356 return NETDEV_TX_OK;
4357
4358unmap_frags:
4359 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4360nest_cancel:
4361 rocker_tlv_nest_cancel(desc_info, frags);
4362out:
4363 dev_kfree_skb(skb);
David Ahernf2bbca52015-01-16 14:22:29 -07004364 dev->stats.tx_dropped++;
4365
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004366 return NETDEV_TX_OK;
4367}
4368
4369static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4370{
4371 struct sockaddr *addr = p;
4372 struct rocker_port *rocker_port = netdev_priv(dev);
4373 int err;
4374
4375 if (!is_valid_ether_addr(addr->sa_data))
4376 return -EADDRNOTAVAIL;
4377
4378 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4379 if (err)
4380 return err;
4381 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4382 return 0;
4383}
4384
Scott Feldman77a58c72015-07-08 16:06:47 -07004385static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4386{
4387 struct rocker_port *rocker_port = netdev_priv(dev);
4388 int running = netif_running(dev);
4389 int err;
4390
4391#define ROCKER_PORT_MIN_MTU 68
4392#define ROCKER_PORT_MAX_MTU 9000
4393
4394 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4395 return -EINVAL;
4396
4397 if (running)
4398 rocker_port_stop(dev);
4399
4400 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4401 dev->mtu = new_mtu;
4402
4403 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4404 if (err)
4405 return err;
4406
4407 if (running)
4408 err = rocker_port_open(dev);
4409
4410 return err;
4411}
4412
David Aherndb191702015-03-17 20:23:16 -06004413static int rocker_port_get_phys_port_name(struct net_device *dev,
4414 char *buf, size_t len)
4415{
4416 struct rocker_port *rocker_port = netdev_priv(dev);
4417 struct port_name name = { .buf = buf, .len = len };
4418 int err;
4419
Jiri Pirko53901cc2016-02-16 15:14:49 +01004420 err = rocker_cmd_exec(rocker_port, false,
David Aherndb191702015-03-17 20:23:16 -06004421 rocker_cmd_get_port_settings_prep, NULL,
4422 rocker_cmd_get_port_settings_phys_name_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004423 &name);
David Aherndb191702015-03-17 20:23:16 -06004424
4425 return err ? -EOPNOTSUPP : 0;
4426}
4427
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004428static int rocker_port_change_proto_down(struct net_device *dev,
4429 bool proto_down)
4430{
4431 struct rocker_port *rocker_port = netdev_priv(dev);
4432
4433 if (rocker_port->dev->flags & IFF_UP)
4434 rocker_port_set_enable(rocker_port, !proto_down);
4435 rocker_port->dev->proto_down = proto_down;
4436 return 0;
4437}
4438
Scott Feldmandd19f832015-08-12 18:45:25 -07004439static void rocker_port_neigh_destroy(struct neighbour *n)
4440{
4441 struct rocker_port *rocker_port = netdev_priv(n->dev);
4442 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4443 __be32 ip_addr = *(__be32 *)n->primary_key;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004444 int err;
Scott Feldmandd19f832015-08-12 18:45:25 -07004445
Jiri Pirko76c6f942015-09-24 10:02:44 +02004446 rocker_port_ipv4_neigh(rocker_port, NULL,
Scott Feldmandd19f832015-08-12 18:45:25 -07004447 flags, ip_addr, n->ha);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004448 err = rocker_world_port_neigh_destroy(rocker_port, n);
4449 if (err)
4450 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
4451 err);
Scott Feldmandd19f832015-08-12 18:45:25 -07004452}
4453
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004454static const struct net_device_ops rocker_port_netdev_ops = {
4455 .ndo_open = rocker_port_open,
4456 .ndo_stop = rocker_port_stop,
4457 .ndo_start_xmit = rocker_port_xmit,
4458 .ndo_set_mac_address = rocker_port_set_mac_address,
Scott Feldman77a58c72015-07-08 16:06:47 -07004459 .ndo_change_mtu = rocker_port_change_mtu,
Scott Feldman85fdb952015-05-10 09:48:05 -07004460 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
Scott Feldmanfc8f40d2015-05-10 09:47:57 -07004461 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
Scott Feldman54ba5a02015-05-10 09:48:01 -07004462 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004463 .ndo_fdb_add = switchdev_port_fdb_add,
4464 .ndo_fdb_del = switchdev_port_fdb_del,
4465 .ndo_fdb_dump = switchdev_port_fdb_dump,
David Aherndb191702015-03-17 20:23:16 -06004466 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004467 .ndo_change_proto_down = rocker_port_change_proto_down,
Scott Feldmandd19f832015-08-12 18:45:25 -07004468 .ndo_neigh_destroy = rocker_port_neigh_destroy,
Scott Feldman98237d42015-03-15 21:07:15 -07004469};
4470
4471/********************
4472 * swdev interface
4473 ********************/
4474
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004475static int rocker_port_attr_get(struct net_device *dev,
4476 struct switchdev_attr *attr)
Scott Feldman98237d42015-03-15 21:07:15 -07004477{
Simon Hormane5054642015-05-25 14:28:36 +09004478 const struct rocker_port *rocker_port = netdev_priv(dev);
4479 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004480 int err = 0;
Scott Feldman98237d42015-03-15 21:07:15 -07004481
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004482 switch (attr->id) {
Jiri Pirko1f868392015-10-01 11:03:42 +02004483 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
Scott Feldman42275bd2015-05-13 11:16:50 -07004484 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4485 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004486 break;
Jiri Pirko1f868392015-10-01 11:03:42 +02004487 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
Scott Feldman42275bd2015-05-13 11:16:50 -07004488 attr->u.brport_flags = rocker_port->brport_flags;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004489 err = rocker_world_port_attr_bridge_flags_get(rocker_port,
4490 &attr->u.brport_flags);
Scott Feldman6004c862015-05-10 09:47:55 -07004491 break;
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004492 default:
4493 return -EOPNOTSUPP;
4494 }
4495
Jiri Pirkoe4201142016-02-16 15:14:45 +01004496 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004497}
4498
Scott Feldman6004c862015-05-10 09:47:55 -07004499static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004500 struct switchdev_trans *trans,
Scott Feldman6004c862015-05-10 09:47:55 -07004501 unsigned long brport_flags)
4502{
4503 unsigned long orig_flags;
4504 int err = 0;
4505
4506 orig_flags = rocker_port->brport_flags;
4507 rocker_port->brport_flags = brport_flags;
Jiri Pirkoae3907e2016-02-16 15:14:48 +01004508 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING &&
4509 !switchdev_trans_ph_prepare(trans))
4510 err = rocker_port_set_learning(rocker_port,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01004511 !!(rocker_port->brport_flags & BR_LEARNING));
Scott Feldman6004c862015-05-10 09:47:55 -07004512
Jiri Pirko76c6f942015-09-24 10:02:44 +02004513 if (switchdev_trans_ph_prepare(trans))
Scott Feldman6004c862015-05-10 09:47:55 -07004514 rocker_port->brport_flags = orig_flags;
4515
4516 return err;
4517}
4518
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004519static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4520 struct switchdev_trans *trans,
4521 u32 ageing_time)
4522{
4523 if (!switchdev_trans_ph_prepare(trans)) {
4524 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4525 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4526 }
4527
4528 return 0;
4529}
4530
Scott Feldmanc4f20322015-05-10 09:47:50 -07004531static int rocker_port_attr_set(struct net_device *dev,
Jiri Pirkof7fadf32015-10-14 19:40:49 +02004532 const struct switchdev_attr *attr,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02004533 struct switchdev_trans *trans)
Scott Feldmanc4f20322015-05-10 09:47:50 -07004534{
4535 struct rocker_port *rocker_port = netdev_priv(dev);
4536 int err = 0;
4537
Scott Feldmanc4f20322015-05-10 09:47:50 -07004538 switch (attr->id) {
Jiri Pirko1f868392015-10-01 11:03:42 +02004539 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
Jiri Pirkod33eeb62015-10-14 19:40:54 +02004540 err = rocker_port_stp_update(rocker_port, trans, 0,
Scott Feldman42275bd2015-05-13 11:16:50 -07004541 attr->u.stp_state);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004542 if (err)
4543 break;
4544 err = rocker_world_port_attr_stp_state_set(rocker_port,
4545 attr->u.stp_state,
4546 trans);
Scott Feldman35636062015-05-10 09:47:51 -07004547 break;
Jiri Pirko1f868392015-10-01 11:03:42 +02004548 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
Jiri Pirko76c6f942015-09-24 10:02:44 +02004549 err = rocker_port_brport_flags_set(rocker_port, trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004550 attr->u.brport_flags);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004551 if (err)
4552 break;
4553 err = rocker_world_port_attr_bridge_flags_set(rocker_port,
4554 attr->u.brport_flags,
4555 trans);
Scott Feldman6004c862015-05-10 09:47:55 -07004556 break;
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004557 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4558 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4559 attr->u.ageing_time);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004560 if (err)
4561 break;
4562 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
4563 attr->u.ageing_time,
4564 trans);
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004565 break;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004566 default:
4567 err = -EOPNOTSUPP;
4568 break;
4569 }
4570
4571 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004572}
4573
Scott Feldman9228ad22015-05-10 09:47:54 -07004574static int rocker_port_vlan_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004575 struct switchdev_trans *trans,
4576 u16 vid, u16 flags)
Scott Feldman9228ad22015-05-10 09:47:54 -07004577{
4578 int err;
4579
4580 /* XXX deal with flags for PVID and untagged */
4581
Jiri Pirko76c6f942015-09-24 10:02:44 +02004582 err = rocker_port_vlan(rocker_port, trans, 0, vid);
Scott Feldman9228ad22015-05-10 09:47:54 -07004583 if (err)
4584 return err;
4585
Jiri Pirko76c6f942015-09-24 10:02:44 +02004586 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
Scott Feldmancec04a62015-06-01 11:39:03 -07004587 if (err)
Jiri Pirko76c6f942015-09-24 10:02:44 +02004588 rocker_port_vlan(rocker_port, trans,
Scott Feldmancec04a62015-06-01 11:39:03 -07004589 ROCKER_OP_FLAG_REMOVE, vid);
4590
4591 return err;
Scott Feldman9228ad22015-05-10 09:47:54 -07004592}
4593
4594static int rocker_port_vlans_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004595 struct switchdev_trans *trans,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004596 const struct switchdev_obj_port_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004597{
4598 u16 vid;
4599 int err;
4600
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004601 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02004602 err = rocker_port_vlan_add(rocker_port, trans,
Scott Feldman9228ad22015-05-10 09:47:54 -07004603 vid, vlan->flags);
4604 if (err)
4605 return err;
4606 }
4607
4608 return 0;
4609}
4610
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004611static int rocker_port_fdb_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004612 struct switchdev_trans *trans,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004613 const struct switchdev_obj_port_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004614{
4615 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4616 int flags = 0;
4617
4618 if (!rocker_port_is_bridged(rocker_port))
4619 return -EINVAL;
4620
Jiri Pirko76c6f942015-09-24 10:02:44 +02004621 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004622}
4623
Scott Feldman9228ad22015-05-10 09:47:54 -07004624static int rocker_port_obj_add(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004625 const struct switchdev_obj *obj,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02004626 struct switchdev_trans *trans)
Scott Feldman9228ad22015-05-10 09:47:54 -07004627{
4628 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004629 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004630 int err = 0;
4631
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004632 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004633 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004634 err = rocker_port_vlans_add(rocker_port, trans,
4635 SWITCHDEV_OBJ_PORT_VLAN(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004636 if (err)
4637 break;
4638 err = rocker_world_port_obj_vlan_add(rocker_port,
4639 SWITCHDEV_OBJ_PORT_VLAN(obj),
4640 trans);
Scott Feldman9228ad22015-05-10 09:47:54 -07004641 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004642 case SWITCHDEV_OBJ_ID_IPV4_FIB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004643 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004644 err = rocker_port_fib_ipv4(rocker_port, trans,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004645 htonl(fib4->dst), fib4->dst_len,
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004646 &fib4->fi, fib4->tb_id, 0);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004647 if (err)
4648 break;
4649 err = rocker_world_port_obj_fib4_add(rocker_port,
4650 SWITCHDEV_OBJ_IPV4_FIB(obj),
4651 trans);
Scott Feldman58c2cb12015-05-10 09:48:06 -07004652 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004653 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004654 err = rocker_port_fdb_add(rocker_port, trans,
4655 SWITCHDEV_OBJ_PORT_FDB(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004656 if (err)
4657 break;
4658 err = rocker_world_port_obj_fdb_add(rocker_port,
4659 SWITCHDEV_OBJ_PORT_FDB(obj),
4660 trans);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004661 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004662 default:
4663 err = -EOPNOTSUPP;
4664 break;
4665 }
4666
4667 return err;
4668}
4669
4670static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4671 u16 vid, u16 flags)
4672{
4673 int err;
4674
Jiri Pirko76c6f942015-09-24 10:02:44 +02004675 err = rocker_port_router_mac(rocker_port, NULL,
Scott Feldman9228ad22015-05-10 09:47:54 -07004676 ROCKER_OP_FLAG_REMOVE, htons(vid));
4677 if (err)
4678 return err;
4679
Jiri Pirko76c6f942015-09-24 10:02:44 +02004680 return rocker_port_vlan(rocker_port, NULL,
Scott Feldman9228ad22015-05-10 09:47:54 -07004681 ROCKER_OP_FLAG_REMOVE, vid);
4682}
4683
4684static int rocker_port_vlans_del(struct rocker_port *rocker_port,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004685 const struct switchdev_obj_port_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004686{
4687 u16 vid;
4688 int err;
4689
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004690 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004691 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4692 if (err)
4693 return err;
4694 }
4695
4696 return 0;
4697}
4698
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004699static int rocker_port_fdb_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004700 struct switchdev_trans *trans,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004701 const struct switchdev_obj_port_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004702{
4703 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
Jiri Pirkod33eeb62015-10-14 19:40:54 +02004704 int flags = ROCKER_OP_FLAG_REMOVE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004705
4706 if (!rocker_port_is_bridged(rocker_port))
4707 return -EINVAL;
4708
Jiri Pirko76c6f942015-09-24 10:02:44 +02004709 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004710}
4711
Scott Feldman9228ad22015-05-10 09:47:54 -07004712static int rocker_port_obj_del(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004713 const struct switchdev_obj *obj)
Scott Feldman9228ad22015-05-10 09:47:54 -07004714{
4715 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004716 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004717 int err = 0;
4718
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004719 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004720 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004721 err = rocker_port_vlans_del(rocker_port,
4722 SWITCHDEV_OBJ_PORT_VLAN(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004723 if (err)
4724 break;
4725 err = rocker_world_port_obj_vlan_del(rocker_port,
4726 SWITCHDEV_OBJ_PORT_VLAN(obj));
Scott Feldman9228ad22015-05-10 09:47:54 -07004727 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004728 case SWITCHDEV_OBJ_ID_IPV4_FIB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004729 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004730 err = rocker_port_fib_ipv4(rocker_port, NULL,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004731 htonl(fib4->dst), fib4->dst_len,
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004732 &fib4->fi, fib4->tb_id,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004733 ROCKER_OP_FLAG_REMOVE);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004734 if (err)
4735 break;
4736 err = rocker_world_port_obj_fib4_del(rocker_port,
4737 SWITCHDEV_OBJ_IPV4_FIB(obj));
Scott Feldman58c2cb12015-05-10 09:48:06 -07004738 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004739 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004740 err = rocker_port_fdb_del(rocker_port, NULL,
4741 SWITCHDEV_OBJ_PORT_FDB(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004742 if (err)
4743 break;
4744 err = rocker_world_port_obj_fdb_del(rocker_port,
4745 SWITCHDEV_OBJ_PORT_FDB(obj));
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004746 break;
4747 default:
4748 err = -EOPNOTSUPP;
4749 break;
4750 }
4751
4752 return err;
4753}
4754
Simon Hormane5054642015-05-25 14:28:36 +09004755static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004756 struct switchdev_obj_port_fdb *fdb,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004757 switchdev_obj_dump_cb_t *cb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004758{
4759 struct rocker *rocker = rocker_port->rocker;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004760 struct rocker_fdb_tbl_entry *found;
4761 struct hlist_node *tmp;
4762 unsigned long lock_flags;
4763 int bkt;
4764 int err = 0;
4765
4766 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4767 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07004768 if (found->key.rocker_port != rocker_port)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004769 continue;
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004770 ether_addr_copy(fdb->addr, found->key.addr);
Vivien Didelotce80e7b2015-08-10 09:09:52 -04004771 fdb->ndm_state = NUD_REACHABLE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004772 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4773 found->key.vlan_id);
Jiri Pirko648b4a92015-10-01 11:03:45 +02004774 err = cb(&fdb->obj);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004775 if (err)
4776 break;
4777 }
4778 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4779
4780 return err;
4781}
4782
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004783static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004784 struct switchdev_obj_port_vlan *vlan,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004785 switchdev_obj_dump_cb_t *cb)
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004786{
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004787 u16 vid;
4788 int err = 0;
4789
4790 for (vid = 1; vid < VLAN_N_VID; vid++) {
4791 if (!test_bit(vid, rocker_port->vlan_bitmap))
4792 continue;
4793 vlan->flags = 0;
4794 if (rocker_vlan_id_is_internal(htons(vid)))
4795 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01004796 vlan->vid_begin = vid;
4797 vlan->vid_end = vid;
Jiri Pirko648b4a92015-10-01 11:03:45 +02004798 err = cb(&vlan->obj);
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004799 if (err)
4800 break;
4801 }
4802
4803 return err;
4804}
4805
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004806static int rocker_port_obj_dump(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004807 struct switchdev_obj *obj,
4808 switchdev_obj_dump_cb_t *cb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004809{
Simon Hormane5054642015-05-25 14:28:36 +09004810 const struct rocker_port *rocker_port = netdev_priv(dev);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004811 int err = 0;
4812
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004813 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004814 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004815 err = rocker_port_fdb_dump(rocker_port,
4816 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004817 if (err)
4818 break;
4819 err = rocker_world_port_obj_fdb_dump(rocker_port,
4820 SWITCHDEV_OBJ_PORT_FDB(obj),
4821 cb);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004822 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004823 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004824 err = rocker_port_vlan_dump(rocker_port,
4825 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004826 if (err)
4827 break;
4828 err = rocker_world_port_obj_vlan_dump(rocker_port,
4829 SWITCHDEV_OBJ_PORT_VLAN(obj),
4830 cb);
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004831 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004832 default:
4833 err = -EOPNOTSUPP;
4834 break;
4835 }
4836
4837 return err;
4838}
4839
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004840static const struct switchdev_ops rocker_port_switchdev_ops = {
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004841 .switchdev_port_attr_get = rocker_port_attr_get,
Scott Feldman35636062015-05-10 09:47:51 -07004842 .switchdev_port_attr_set = rocker_port_attr_set,
Scott Feldman9228ad22015-05-10 09:47:54 -07004843 .switchdev_port_obj_add = rocker_port_obj_add,
4844 .switchdev_port_obj_del = rocker_port_obj_del,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004845 .switchdev_port_obj_dump = rocker_port_obj_dump,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004846};
4847
4848/********************
4849 * ethtool interface
4850 ********************/
4851
4852static int rocker_port_get_settings(struct net_device *dev,
4853 struct ethtool_cmd *ecmd)
4854{
4855 struct rocker_port *rocker_port = netdev_priv(dev);
4856
4857 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4858}
4859
4860static int rocker_port_set_settings(struct net_device *dev,
4861 struct ethtool_cmd *ecmd)
4862{
4863 struct rocker_port *rocker_port = netdev_priv(dev);
4864
4865 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4866}
4867
4868static void rocker_port_get_drvinfo(struct net_device *dev,
4869 struct ethtool_drvinfo *drvinfo)
4870{
4871 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4872 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4873}
4874
David Ahern9766e972015-01-29 20:59:33 -07004875static struct rocker_port_stats {
4876 char str[ETH_GSTRING_LEN];
4877 int type;
4878} rocker_port_stats[] = {
4879 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4880 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4881 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4882 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4883
4884 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4885 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4886 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4887 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4888};
4889
4890#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4891
4892static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4893 u8 *data)
4894{
4895 u8 *p = data;
4896 int i;
4897
4898 switch (stringset) {
4899 case ETH_SS_STATS:
4900 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4901 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4902 p += ETH_GSTRING_LEN;
4903 }
4904 break;
4905 }
4906}
4907
4908static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004909rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
David Ahern9766e972015-01-29 20:59:33 -07004910 struct rocker_desc_info *desc_info,
4911 void *priv)
4912{
4913 struct rocker_tlv *cmd_stats;
4914
4915 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4916 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4917 return -EMSGSIZE;
4918
4919 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4920 if (!cmd_stats)
4921 return -EMSGSIZE;
4922
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004923 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4924 rocker_port->pport))
David Ahern9766e972015-01-29 20:59:33 -07004925 return -EMSGSIZE;
4926
4927 rocker_tlv_nest_end(desc_info, cmd_stats);
4928
4929 return 0;
4930}
4931
4932static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004933rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004934 const struct rocker_desc_info *desc_info,
David Ahern9766e972015-01-29 20:59:33 -07004935 void *priv)
4936{
Simon Hormane5054642015-05-25 14:28:36 +09004937 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4938 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4939 const struct rocker_tlv *pattr;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004940 u32 pport;
David Ahern9766e972015-01-29 20:59:33 -07004941 u64 *data = priv;
4942 int i;
4943
4944 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4945
4946 if (!attrs[ROCKER_TLV_CMD_INFO])
4947 return -EIO;
4948
4949 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4950 attrs[ROCKER_TLV_CMD_INFO]);
4951
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004952 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
David Ahern9766e972015-01-29 20:59:33 -07004953 return -EIO;
4954
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004955 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4956 if (pport != rocker_port->pport)
David Ahern9766e972015-01-29 20:59:33 -07004957 return -EIO;
4958
4959 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4960 pattr = stats_attrs[rocker_port_stats[i].type];
4961 if (!pattr)
4962 continue;
4963
4964 data[i] = rocker_tlv_get_u64(pattr);
4965 }
4966
4967 return 0;
4968}
4969
4970static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4971 void *priv)
4972{
Jiri Pirko53901cc2016-02-16 15:14:49 +01004973 return rocker_cmd_exec(rocker_port, false,
David Ahern9766e972015-01-29 20:59:33 -07004974 rocker_cmd_get_port_stats_prep, NULL,
4975 rocker_cmd_get_port_stats_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004976 priv);
David Ahern9766e972015-01-29 20:59:33 -07004977}
4978
4979static void rocker_port_get_stats(struct net_device *dev,
4980 struct ethtool_stats *stats, u64 *data)
4981{
4982 struct rocker_port *rocker_port = netdev_priv(dev);
4983
4984 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4985 int i;
4986
4987 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4988 data[i] = 0;
4989 }
David Ahern9766e972015-01-29 20:59:33 -07004990}
4991
4992static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4993{
4994 switch (sset) {
4995 case ETH_SS_STATS:
4996 return ROCKER_PORT_STATS_LEN;
4997 default:
4998 return -EOPNOTSUPP;
4999 }
5000}
5001
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005002static const struct ethtool_ops rocker_port_ethtool_ops = {
5003 .get_settings = rocker_port_get_settings,
5004 .set_settings = rocker_port_set_settings,
5005 .get_drvinfo = rocker_port_get_drvinfo,
5006 .get_link = ethtool_op_get_link,
David Ahern9766e972015-01-29 20:59:33 -07005007 .get_strings = rocker_port_get_strings,
5008 .get_ethtool_stats = rocker_port_get_stats,
5009 .get_sset_count = rocker_port_get_sset_count,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005010};
5011
5012/*****************
5013 * NAPI interface
5014 *****************/
5015
5016static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
5017{
5018 return container_of(napi, struct rocker_port, napi_tx);
5019}
5020
5021static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
5022{
5023 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09005024 const struct rocker *rocker = rocker_port->rocker;
5025 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005026 u32 credits = 0;
5027 int err;
5028
5029 /* Cleanup tx descriptors */
5030 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
David Ahernf2bbca52015-01-16 14:22:29 -07005031 struct sk_buff *skb;
5032
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005033 err = rocker_desc_err(desc_info);
5034 if (err && net_ratelimit())
5035 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
5036 err);
5037 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
David Ahernf2bbca52015-01-16 14:22:29 -07005038
5039 skb = rocker_desc_cookie_ptr_get(desc_info);
5040 if (err == 0) {
5041 rocker_port->dev->stats.tx_packets++;
5042 rocker_port->dev->stats.tx_bytes += skb->len;
Scott Feldman4725ceb2015-05-10 09:48:08 -07005043 } else {
David Ahernf2bbca52015-01-16 14:22:29 -07005044 rocker_port->dev->stats.tx_errors++;
Scott Feldman4725ceb2015-05-10 09:48:08 -07005045 }
David Ahernf2bbca52015-01-16 14:22:29 -07005046
5047 dev_kfree_skb_any(skb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005048 credits++;
5049 }
5050
5051 if (credits && netif_queue_stopped(rocker_port->dev))
5052 netif_wake_queue(rocker_port->dev);
5053
5054 napi_complete(napi);
5055 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
5056
5057 return 0;
5058}
5059
Simon Hormane5054642015-05-25 14:28:36 +09005060static int rocker_port_rx_proc(const struct rocker *rocker,
5061 const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005062 struct rocker_desc_info *desc_info)
5063{
Simon Hormane5054642015-05-25 14:28:36 +09005064 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005065 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
5066 size_t rx_len;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005067 u16 rx_flags = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005068
5069 if (!skb)
5070 return -ENOENT;
5071
5072 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
5073 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
5074 return -EINVAL;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005075 if (attrs[ROCKER_TLV_RX_FLAGS])
5076 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005077
5078 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
5079
5080 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
5081 skb_put(skb, rx_len);
5082 skb->protocol = eth_type_trans(skb, rocker_port->dev);
David Ahernf2bbca52015-01-16 14:22:29 -07005083
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005084 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
5085 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
5086
David Ahernf2bbca52015-01-16 14:22:29 -07005087 rocker_port->dev->stats.rx_packets++;
5088 rocker_port->dev->stats.rx_bytes += skb->len;
5089
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005090 netif_receive_skb(skb);
5091
Simon Horman534ba6a2015-06-01 13:25:04 +09005092 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005093}
5094
5095static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
5096{
5097 return container_of(napi, struct rocker_port, napi_rx);
5098}
5099
5100static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
5101{
5102 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09005103 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005104 struct rocker_desc_info *desc_info;
5105 u32 credits = 0;
5106 int err;
5107
5108 /* Process rx descriptors */
5109 while (credits < budget &&
5110 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
5111 err = rocker_desc_err(desc_info);
5112 if (err) {
5113 if (net_ratelimit())
5114 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
5115 err);
5116 } else {
5117 err = rocker_port_rx_proc(rocker, rocker_port,
5118 desc_info);
5119 if (err && net_ratelimit())
5120 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
5121 err);
5122 }
David Ahernf2bbca52015-01-16 14:22:29 -07005123 if (err)
5124 rocker_port->dev->stats.rx_errors++;
5125
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005126 rocker_desc_gen_clear(desc_info);
5127 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
5128 credits++;
5129 }
5130
5131 if (credits < budget)
5132 napi_complete(napi);
5133
5134 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
5135
5136 return credits;
5137}
5138
5139/*****************
5140 * PCI driver ops
5141 *****************/
5142
Simon Hormane5054642015-05-25 14:28:36 +09005143static void rocker_carrier_init(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005144{
Simon Hormane5054642015-05-25 14:28:36 +09005145 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005146 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
5147 bool link_up;
5148
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08005149 link_up = link_status & (1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005150 if (link_up)
5151 netif_carrier_on(rocker_port->dev);
5152 else
5153 netif_carrier_off(rocker_port->dev);
5154}
5155
Jiri Pirkoe4201142016-02-16 15:14:45 +01005156static void rocker_remove_ports(struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005157{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005158 struct rocker_port *rocker_port;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005159 int i;
5160
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005161 for (i = 0; i < rocker->port_count; i++) {
5162 rocker_port = rocker->ports[i];
Scott Feldmana0720312015-06-12 21:09:44 -07005163 if (!rocker_port)
5164 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02005165 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
Jiri Pirkoe4201142016-02-16 15:14:45 +01005166 rocker_world_port_fini(rocker_port);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005167 unregister_netdev(rocker_port->dev);
Jiri Pirkoe4201142016-02-16 15:14:45 +01005168 rocker_world_port_post_fini(rocker_port);
Ido Schimmel1ebd47e2015-08-02 19:29:16 +02005169 free_netdev(rocker_port->dev);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005170 }
Jiri Pirkoe4201142016-02-16 15:14:45 +01005171 rocker_world_fini(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005172 kfree(rocker->ports);
5173}
5174
Simon Horman534ba6a2015-06-01 13:25:04 +09005175static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005176{
Simon Horman534ba6a2015-06-01 13:25:04 +09005177 const struct rocker *rocker = rocker_port->rocker;
Simon Hormane5054642015-05-25 14:28:36 +09005178 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005179 int err;
5180
5181 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
5182 rocker_port->dev->dev_addr);
5183 if (err) {
5184 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
5185 eth_hw_addr_random(rocker_port->dev);
5186 }
5187}
5188
5189static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
5190{
Simon Hormane5054642015-05-25 14:28:36 +09005191 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005192 struct rocker_port *rocker_port;
5193 struct net_device *dev;
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005194 u16 untagged_vid = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005195 int err;
5196
5197 dev = alloc_etherdev(sizeof(struct rocker_port));
5198 if (!dev)
5199 return -ENOMEM;
5200 rocker_port = netdev_priv(dev);
5201 rocker_port->dev = dev;
5202 rocker_port->rocker = rocker;
5203 rocker_port->port_number = port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08005204 rocker_port->pport = port_number + 1;
Scott Feldman5111f802014-11-28 14:34:30 +01005205 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
Scott Feldmane7335702015-09-23 08:39:17 -07005206 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005207
Jiri Pirkoe4201142016-02-16 15:14:45 +01005208 err = rocker_world_check_init(rocker_port);
5209 if (err) {
5210 dev_err(&pdev->dev, "world init failed\n");
5211 goto err_world_check_init;
5212 }
5213
Simon Horman534ba6a2015-06-01 13:25:04 +09005214 rocker_port_dev_addr_init(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005215 dev->netdev_ops = &rocker_port_netdev_ops;
5216 dev->ethtool_ops = &rocker_port_ethtool_ops;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07005217 dev->switchdev_ops = &rocker_port_switchdev_ops;
Eric Dumazetd64b5e82015-11-18 06:31:00 -08005218 netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01005219 NAPI_POLL_WEIGHT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005220 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5221 NAPI_POLL_WEIGHT);
5222 rocker_carrier_init(rocker_port);
5223
Ido Schimmel21518a62015-08-02 20:56:37 +02005224 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005225
Jiri Pirkoe4201142016-02-16 15:14:45 +01005226 err = rocker_world_port_pre_init(rocker_port);
5227 if (err) {
5228 dev_err(&pdev->dev, "port world pre-init failed\n");
5229 goto err_world_port_pre_init;
5230 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005231 err = register_netdev(dev);
5232 if (err) {
5233 dev_err(&pdev->dev, "register_netdev failed\n");
5234 goto err_register_netdev;
5235 }
5236 rocker->ports[port_number] = rocker_port;
5237
Jiri Pirkoe4201142016-02-16 15:14:45 +01005238 err = rocker_world_port_init(rocker_port);
5239 if (err) {
5240 dev_err(&pdev->dev, "port world init failed\n");
5241 goto err_world_port_init;
5242 }
5243
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005244 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5245
Jiri Pirkoae3907e2016-02-16 15:14:48 +01005246 rocker_port_set_learning(rocker_port,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01005247 !!(rocker_port->brport_flags & BR_LEARNING));
Scott Feldman5111f802014-11-28 14:34:30 +01005248
Jiri Pirko76c6f942015-09-24 10:02:44 +02005249 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005250 if (err) {
Scott Feldmanff147022015-08-03 22:31:18 -07005251 netdev_err(rocker_port->dev, "install ig port table failed\n");
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005252 goto err_port_ig_tbl;
5253 }
5254
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005255 rocker_port->internal_vlan_id =
5256 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5257
Jiri Pirko76c6f942015-09-24 10:02:44 +02005258 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005259 if (err) {
5260 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5261 goto err_untagged_vlan;
5262 }
5263
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005264 return 0;
5265
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005266err_untagged_vlan:
Jiri Pirko76c6f942015-09-24 10:02:44 +02005267 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005268err_port_ig_tbl:
Jiri Pirkoe4201142016-02-16 15:14:45 +01005269 rocker_world_port_fini(rocker_port);
5270err_world_port_init:
Scott Feldman6c4f7782015-08-03 22:31:17 -07005271 rocker->ports[port_number] = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005272 unregister_netdev(dev);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005273err_register_netdev:
Jiri Pirkoe4201142016-02-16 15:14:45 +01005274 rocker_world_port_post_fini(rocker_port);
5275err_world_port_pre_init:
5276err_world_check_init:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005277 free_netdev(dev);
5278 return err;
5279}
5280
5281static int rocker_probe_ports(struct rocker *rocker)
5282{
5283 int i;
5284 size_t alloc_size;
5285 int err;
5286
5287 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
Scott Feldman27b808c2015-06-01 11:39:02 -07005288 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
Dan Carpentere65ad3b2015-02-25 16:35:32 +03005289 if (!rocker->ports)
5290 return -ENOMEM;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005291 for (i = 0; i < rocker->port_count; i++) {
5292 err = rocker_probe_port(rocker, i);
5293 if (err)
5294 goto remove_ports;
5295 }
5296 return 0;
5297
5298remove_ports:
5299 rocker_remove_ports(rocker);
5300 return err;
5301}
5302
5303static int rocker_msix_init(struct rocker *rocker)
5304{
5305 struct pci_dev *pdev = rocker->pdev;
5306 int msix_entries;
5307 int i;
5308 int err;
5309
5310 msix_entries = pci_msix_vec_count(pdev);
5311 if (msix_entries < 0)
5312 return msix_entries;
5313
5314 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5315 return -EINVAL;
5316
5317 rocker->msix_entries = kmalloc_array(msix_entries,
5318 sizeof(struct msix_entry),
5319 GFP_KERNEL);
5320 if (!rocker->msix_entries)
5321 return -ENOMEM;
5322
5323 for (i = 0; i < msix_entries; i++)
5324 rocker->msix_entries[i].entry = i;
5325
5326 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5327 if (err < 0)
5328 goto err_enable_msix;
5329
5330 return 0;
5331
5332err_enable_msix:
5333 kfree(rocker->msix_entries);
5334 return err;
5335}
5336
Simon Hormane5054642015-05-25 14:28:36 +09005337static void rocker_msix_fini(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005338{
5339 pci_disable_msix(rocker->pdev);
5340 kfree(rocker->msix_entries);
5341}
5342
5343static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5344{
5345 struct rocker *rocker;
5346 int err;
5347
5348 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5349 if (!rocker)
5350 return -ENOMEM;
5351
5352 err = pci_enable_device(pdev);
5353 if (err) {
5354 dev_err(&pdev->dev, "pci_enable_device failed\n");
5355 goto err_pci_enable_device;
5356 }
5357
5358 err = pci_request_regions(pdev, rocker_driver_name);
5359 if (err) {
5360 dev_err(&pdev->dev, "pci_request_regions failed\n");
5361 goto err_pci_request_regions;
5362 }
5363
5364 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5365 if (!err) {
5366 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5367 if (err) {
5368 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5369 goto err_pci_set_dma_mask;
5370 }
5371 } else {
5372 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5373 if (err) {
5374 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5375 goto err_pci_set_dma_mask;
5376 }
5377 }
5378
5379 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5380 dev_err(&pdev->dev, "invalid PCI region size\n");
Wei Yongjun3122a922015-04-16 20:21:02 +08005381 err = -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005382 goto err_pci_resource_len_check;
5383 }
5384
5385 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5386 pci_resource_len(pdev, 0));
5387 if (!rocker->hw_addr) {
5388 dev_err(&pdev->dev, "ioremap failed\n");
5389 err = -EIO;
5390 goto err_ioremap;
5391 }
5392 pci_set_master(pdev);
5393
5394 rocker->pdev = pdev;
5395 pci_set_drvdata(pdev, rocker);
5396
5397 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5398
5399 err = rocker_msix_init(rocker);
5400 if (err) {
5401 dev_err(&pdev->dev, "MSI-X init failed\n");
5402 goto err_msix_init;
5403 }
5404
5405 err = rocker_basic_hw_test(rocker);
5406 if (err) {
5407 dev_err(&pdev->dev, "basic hw test failed\n");
5408 goto err_basic_hw_test;
5409 }
5410
5411 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5412
5413 err = rocker_dma_rings_init(rocker);
5414 if (err)
5415 goto err_dma_rings_init;
5416
5417 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5418 rocker_cmd_irq_handler, 0,
5419 rocker_driver_name, rocker);
5420 if (err) {
5421 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5422 goto err_request_cmd_irq;
5423 }
5424
5425 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5426 rocker_event_irq_handler, 0,
5427 rocker_driver_name, rocker);
5428 if (err) {
5429 dev_err(&pdev->dev, "cannot assign event irq\n");
5430 goto err_request_event_irq;
5431 }
5432
5433 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5434
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005435 err = rocker_init_tbls(rocker);
5436 if (err) {
5437 dev_err(&pdev->dev, "cannot init rocker tables\n");
5438 goto err_init_tbls;
5439 }
5440
Scott Feldman52fe3e22015-09-23 08:39:18 -07005441 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5442 (unsigned long) rocker);
5443 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5444
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005445 err = rocker_probe_ports(rocker);
5446 if (err) {
5447 dev_err(&pdev->dev, "failed to probe ports\n");
5448 goto err_probe_ports;
5449 }
5450
Scott Feldmanc8beb5b2015-08-12 18:44:13 -07005451 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5452 (int)sizeof(rocker->hw.id), &rocker->hw.id);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005453
5454 return 0;
5455
5456err_probe_ports:
Scott Feldman52fe3e22015-09-23 08:39:18 -07005457 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005458 rocker_free_tbls(rocker);
5459err_init_tbls:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005460 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5461err_request_event_irq:
5462 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5463err_request_cmd_irq:
5464 rocker_dma_rings_fini(rocker);
5465err_dma_rings_init:
5466err_basic_hw_test:
5467 rocker_msix_fini(rocker);
5468err_msix_init:
5469 iounmap(rocker->hw_addr);
5470err_ioremap:
5471err_pci_resource_len_check:
5472err_pci_set_dma_mask:
5473 pci_release_regions(pdev);
5474err_pci_request_regions:
5475 pci_disable_device(pdev);
5476err_pci_enable_device:
5477 kfree(rocker);
5478 return err;
5479}
5480
5481static void rocker_remove(struct pci_dev *pdev)
5482{
5483 struct rocker *rocker = pci_get_drvdata(pdev);
5484
Scott Feldman52fe3e22015-09-23 08:39:18 -07005485 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005486 rocker_free_tbls(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005487 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5488 rocker_remove_ports(rocker);
5489 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5490 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5491 rocker_dma_rings_fini(rocker);
5492 rocker_msix_fini(rocker);
5493 iounmap(rocker->hw_addr);
5494 pci_release_regions(rocker->pdev);
5495 pci_disable_device(rocker->pdev);
5496 kfree(rocker);
5497}
5498
5499static struct pci_driver rocker_pci_driver = {
5500 .name = rocker_driver_name,
5501 .id_table = rocker_pci_id_table,
5502 .probe = rocker_probe,
5503 .remove = rocker_remove,
5504};
5505
Scott Feldman6c707942014-11-28 14:34:28 +01005506/************************************
5507 * Net device notifier event handler
5508 ************************************/
5509
Simon Hormane5054642015-05-25 14:28:36 +09005510static bool rocker_port_dev_check(const struct net_device *dev)
Scott Feldman6c707942014-11-28 14:34:28 +01005511{
5512 return dev->netdev_ops == &rocker_port_netdev_ops;
5513}
5514
5515static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5516 struct net_device *bridge)
5517{
Scott Feldman027e00d2015-06-01 11:39:05 -07005518 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005519 int err;
5520
Scott Feldman027e00d2015-06-01 11:39:05 -07005521 /* Port is joining bridge, so the internal VLAN for the
5522 * port is going to change to the bridge internal VLAN.
5523 * Let's remove untagged VLAN (vid=0) from port and
5524 * re-add once internal VLAN has changed.
5525 */
5526
5527 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5528 if (err)
5529 return err;
5530
Simon Hormandf6a2062015-05-21 12:40:17 +09005531 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005532 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005533 rocker_port->internal_vlan_id =
5534 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005535
5536 rocker_port->bridge_dev = bridge;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005537 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
Scott Feldman6c707942014-11-28 14:34:28 +01005538
Jiri Pirko76c6f942015-09-24 10:02:44 +02005539 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005540}
5541
5542static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5543{
Scott Feldman027e00d2015-06-01 11:39:05 -07005544 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005545 int err;
5546
Scott Feldman027e00d2015-06-01 11:39:05 -07005547 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5548 if (err)
5549 return err;
5550
Simon Hormandf6a2062015-05-21 12:40:17 +09005551 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005552 rocker_port->bridge_dev->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005553 rocker_port->internal_vlan_id =
5554 rocker_port_internal_vlan_id_get(rocker_port,
5555 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005556
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005557 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5558 false);
Scott Feldman027e00d2015-06-01 11:39:05 -07005559 rocker_port->bridge_dev = NULL;
5560
Jiri Pirko76c6f942015-09-24 10:02:44 +02005561 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldmane47172a2015-02-25 20:15:38 -08005562 if (err)
5563 return err;
5564
5565 if (rocker_port->dev->flags & IFF_UP)
Jiri Pirko76c6f942015-09-24 10:02:44 +02005566 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005567
5568 return err;
5569}
5570
Simon Horman82549732015-07-16 10:39:14 +09005571static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5572 struct net_device *master)
5573{
5574 int err;
5575
5576 rocker_port->bridge_dev = master;
5577
Jiri Pirko76c6f942015-09-24 10:02:44 +02005578 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
Simon Horman82549732015-07-16 10:39:14 +09005579 if (err)
5580 return err;
Jiri Pirko76c6f942015-09-24 10:02:44 +02005581 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Simon Horman82549732015-07-16 10:39:14 +09005582
5583 return err;
5584}
5585
Jiri Pirko686ed302015-08-27 09:31:23 +02005586static int rocker_port_master_linked(struct rocker_port *rocker_port,
5587 struct net_device *master)
Scott Feldman6c707942014-11-28 14:34:28 +01005588{
Scott Feldman6c707942014-11-28 14:34:28 +01005589 int err = 0;
5590
Jiri Pirko686ed302015-08-27 09:31:23 +02005591 if (netif_is_bridge_master(master))
5592 err = rocker_port_bridge_join(rocker_port, master);
5593 else if (netif_is_ovs_master(master))
5594 err = rocker_port_ovs_changed(rocker_port, master);
5595 return err;
5596}
Scott Feldman6c707942014-11-28 14:34:28 +01005597
Jiri Pirko686ed302015-08-27 09:31:23 +02005598static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5599{
5600 int err = 0;
5601
5602 if (rocker_port_is_bridged(rocker_port))
5603 err = rocker_port_bridge_leave(rocker_port);
5604 else if (rocker_port_is_ovsed(rocker_port))
5605 err = rocker_port_ovs_changed(rocker_port, NULL);
Scott Feldman6c707942014-11-28 14:34:28 +01005606 return err;
5607}
5608
5609static int rocker_netdevice_event(struct notifier_block *unused,
5610 unsigned long event, void *ptr)
5611{
Jiri Pirko686ed302015-08-27 09:31:23 +02005612 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5613 struct netdev_notifier_changeupper_info *info;
5614 struct rocker_port *rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01005615 int err;
5616
Jiri Pirko686ed302015-08-27 09:31:23 +02005617 if (!rocker_port_dev_check(dev))
5618 return NOTIFY_DONE;
5619
Scott Feldman6c707942014-11-28 14:34:28 +01005620 switch (event) {
5621 case NETDEV_CHANGEUPPER:
Jiri Pirko686ed302015-08-27 09:31:23 +02005622 info = ptr;
5623 if (!info->master)
5624 goto out;
5625 rocker_port = netdev_priv(dev);
5626 if (info->linking) {
Jiri Pirkoe4201142016-02-16 15:14:45 +01005627 err = rocker_world_port_master_linked(rocker_port,
5628 info->upper_dev);
5629 if (err)
5630 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5631 err);
Jiri Pirko686ed302015-08-27 09:31:23 +02005632 err = rocker_port_master_linked(rocker_port,
5633 info->upper_dev);
5634 if (err)
5635 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5636 err);
5637 } else {
Jiri Pirkoe4201142016-02-16 15:14:45 +01005638 err = rocker_world_port_master_unlinked(rocker_port,
5639 info->upper_dev);
5640 if (err)
5641 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5642 err);
Jiri Pirko686ed302015-08-27 09:31:23 +02005643 err = rocker_port_master_unlinked(rocker_port);
5644 if (err)
5645 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5646 err);
5647 }
Scott Feldman6c707942014-11-28 14:34:28 +01005648 break;
5649 }
Jiri Pirko686ed302015-08-27 09:31:23 +02005650out:
Scott Feldman6c707942014-11-28 14:34:28 +01005651 return NOTIFY_DONE;
5652}
5653
5654static struct notifier_block rocker_netdevice_nb __read_mostly = {
5655 .notifier_call = rocker_netdevice_event,
5656};
5657
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005658/************************************
5659 * Net event notifier event handler
5660 ************************************/
5661
5662static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5663{
5664 struct rocker_port *rocker_port = netdev_priv(dev);
Scott Feldman02a9fbf2015-06-12 21:35:47 -07005665 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5666 ROCKER_OP_FLAG_NOWAIT;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005667 __be32 ip_addr = *(__be32 *)n->primary_key;
5668
Jiri Pirko76c6f942015-09-24 10:02:44 +02005669 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005670}
5671
5672static int rocker_netevent_event(struct notifier_block *unused,
5673 unsigned long event, void *ptr)
5674{
Jiri Pirkoe4201142016-02-16 15:14:45 +01005675 struct rocker_port *rocker_port;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005676 struct net_device *dev;
5677 struct neighbour *n = ptr;
5678 int err;
5679
5680 switch (event) {
5681 case NETEVENT_NEIGH_UPDATE:
5682 if (n->tbl != &arp_tbl)
5683 return NOTIFY_DONE;
5684 dev = n->dev;
5685 if (!rocker_port_dev_check(dev))
5686 return NOTIFY_DONE;
Jiri Pirkoe4201142016-02-16 15:14:45 +01005687 rocker_port = netdev_priv(dev);
5688 err = rocker_world_port_neigh_update(rocker_port, n);
5689 if (err)
5690 netdev_warn(dev, "failed to handle neigh update (err %d)\n",
5691 err);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005692 err = rocker_neigh_update(dev, n);
5693 if (err)
5694 netdev_warn(dev,
5695 "failed to handle neigh update (err %d)\n",
5696 err);
5697 break;
5698 }
5699
5700 return NOTIFY_DONE;
5701}
5702
5703static struct notifier_block rocker_netevent_nb __read_mostly = {
5704 .notifier_call = rocker_netevent_event,
5705};
5706
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005707/***********************
5708 * Module init and exit
5709 ***********************/
5710
5711static int __init rocker_module_init(void)
5712{
Scott Feldman6c707942014-11-28 14:34:28 +01005713 int err;
5714
5715 register_netdevice_notifier(&rocker_netdevice_nb);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005716 register_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005717 err = pci_register_driver(&rocker_pci_driver);
5718 if (err)
5719 goto err_pci_register_driver;
5720 return 0;
5721
5722err_pci_register_driver:
Gilad Ben-Yossefa076e6b2015-06-23 10:52:10 +03005723 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005724 unregister_netdevice_notifier(&rocker_netdevice_nb);
5725 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005726}
5727
5728static void __exit rocker_module_exit(void)
5729{
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005730 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005731 unregister_netdevice_notifier(&rocker_netdevice_nb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005732 pci_unregister_driver(&rocker_pci_driver);
5733}
5734
5735module_init(rocker_module_init);
5736module_exit(rocker_module_exit);
5737
5738MODULE_LICENSE("GPL v2");
5739MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5740MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5741MODULE_DESCRIPTION("Rocker switch device driver");
5742MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);