blob: 8113b1596075a9fcb7402880bf8ba9621a576d6f [file] [log] [blame]
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010019#include <linux/hashtable.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010020#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
Scott Feldman6c707942014-11-28 14:34:28 +010031#include <linux/if_bridge.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010032#include <linux/bitops.h>
David Aherndb191702015-03-17 20:23:16 -060033#include <linux/ctype.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010034#include <net/switchdev.h>
35#include <net/rtnetlink.h>
Scott Feldmanc1beeef2015-03-05 21:21:20 -080036#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
Christoph Hellwig2f8e2c82015-08-28 09:27:14 +020039#include <linux/io-64-nonatomic-lo-hi.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010040#include <generated/utsrelease.h>
41
Jiri Pirko0fe685f2016-02-16 15:14:40 +010042#include "rocker_hw.h"
Jiri Pirkode152192016-02-16 15:14:42 +010043#include "rocker.h"
44#include "rocker_tlv.h"
Jiri Pirko4b8ac962014-11-28 14:34:26 +010045
46static const char rocker_driver_name[] = "rocker";
47
48static const struct pci_device_id rocker_pci_id_table[] = {
49 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
50 {0, }
51};
52
Scott Feldman9f6bbf72014-11-28 14:34:27 +010053struct rocker_flow_tbl_key {
54 u32 priority;
55 enum rocker_of_dpa_table_id tbl_id;
56 union {
57 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080058 u32 in_pport;
59 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010060 enum rocker_of_dpa_table_id goto_tbl;
61 } ig_port;
62 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080063 u32 in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010064 __be16 vlan_id;
65 __be16 vlan_id_mask;
66 enum rocker_of_dpa_table_id goto_tbl;
67 bool untagged;
68 __be16 new_vlan_id;
69 } vlan;
70 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080071 u32 in_pport;
72 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010073 __be16 eth_type;
74 u8 eth_dst[ETH_ALEN];
75 u8 eth_dst_mask[ETH_ALEN];
76 __be16 vlan_id;
77 __be16 vlan_id_mask;
78 enum rocker_of_dpa_table_id goto_tbl;
79 bool copy_to_cpu;
80 } term_mac;
81 struct {
82 __be16 eth_type;
83 __be32 dst4;
84 __be32 dst4_mask;
85 enum rocker_of_dpa_table_id goto_tbl;
86 u32 group_id;
87 } ucast_routing;
88 struct {
89 u8 eth_dst[ETH_ALEN];
90 u8 eth_dst_mask[ETH_ALEN];
91 int has_eth_dst;
92 int has_eth_dst_mask;
93 __be16 vlan_id;
94 u32 tunnel_id;
95 enum rocker_of_dpa_table_id goto_tbl;
96 u32 group_id;
97 bool copy_to_cpu;
98 } bridge;
99 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800100 u32 in_pport;
101 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100102 u8 eth_src[ETH_ALEN];
103 u8 eth_src_mask[ETH_ALEN];
104 u8 eth_dst[ETH_ALEN];
105 u8 eth_dst_mask[ETH_ALEN];
106 __be16 eth_type;
107 __be16 vlan_id;
108 __be16 vlan_id_mask;
109 u8 ip_proto;
110 u8 ip_proto_mask;
111 u8 ip_tos;
112 u8 ip_tos_mask;
113 u32 group_id;
114 } acl;
115 };
116};
117
118struct rocker_flow_tbl_entry {
119 struct hlist_node entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800120 u32 cmd;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100121 u64 cookie;
122 struct rocker_flow_tbl_key key;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800123 size_t key_len;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100124 u32 key_crc32; /* key */
125};
126
127struct rocker_group_tbl_entry {
128 struct hlist_node entry;
129 u32 cmd;
130 u32 group_id; /* key */
131 u16 group_count;
132 u32 *group_ids;
133 union {
134 struct {
135 u8 pop_vlan;
136 } l2_interface;
137 struct {
138 u8 eth_src[ETH_ALEN];
139 u8 eth_dst[ETH_ALEN];
140 __be16 vlan_id;
141 u32 group_id;
142 } l2_rewrite;
143 struct {
144 u8 eth_src[ETH_ALEN];
145 u8 eth_dst[ETH_ALEN];
146 __be16 vlan_id;
147 bool ttl_check;
148 u32 group_id;
149 } l3_unicast;
150 };
151};
152
153struct rocker_fdb_tbl_entry {
154 struct hlist_node entry;
155 u32 key_crc32; /* key */
156 bool learned;
Scott Feldmana471be42015-09-23 08:39:14 -0700157 unsigned long touched;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100158 struct rocker_fdb_tbl_key {
Scott Feldman4c660492015-09-23 08:39:15 -0700159 struct rocker_port *rocker_port;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100160 u8 addr[ETH_ALEN];
161 __be16 vlan_id;
162 } key;
163};
164
165struct rocker_internal_vlan_tbl_entry {
166 struct hlist_node entry;
167 int ifindex; /* key */
168 u32 ref_count;
169 __be16 vlan_id;
170};
171
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800172struct rocker_neigh_tbl_entry {
173 struct hlist_node entry;
174 __be32 ip_addr; /* key */
175 struct net_device *dev;
176 u32 ref_count;
177 u32 index;
178 u8 eth_dst[ETH_ALEN];
179 bool ttl_check;
180};
181
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100182static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
183static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
184static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
185static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
186static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
187static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
188static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
189static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
190static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
191
192/* Rocker priority levels for flow table entries. Higher
193 * priority match takes precedence over lower priority match.
194 */
195
196enum {
197 ROCKER_PRIORITY_UNKNOWN = 0,
198 ROCKER_PRIORITY_IG_PORT = 1,
199 ROCKER_PRIORITY_VLAN = 1,
200 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
201 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100202 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
203 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
204 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
205 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
206 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
207 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
208 ROCKER_PRIORITY_ACL_CTRL = 3,
209 ROCKER_PRIORITY_ACL_NORMAL = 2,
210 ROCKER_PRIORITY_ACL_DFLT = 1,
211};
212
213static bool rocker_vlan_id_is_internal(__be16 vlan_id)
214{
215 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
216 u16 end = 0xffe;
217 u16 _vlan_id = ntohs(vlan_id);
218
219 return (_vlan_id >= start && _vlan_id <= end);
220}
221
Simon Hormane5054642015-05-25 14:28:36 +0900222static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100223 u16 vid, bool *pop_vlan)
224{
225 __be16 vlan_id;
226
227 if (pop_vlan)
228 *pop_vlan = false;
229 vlan_id = htons(vid);
230 if (!vlan_id) {
231 vlan_id = rocker_port->internal_vlan_id;
232 if (pop_vlan)
233 *pop_vlan = true;
234 }
235
236 return vlan_id;
237}
238
Simon Hormane5054642015-05-25 14:28:36 +0900239static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +0100240 __be16 vlan_id)
241{
242 if (rocker_vlan_id_is_internal(vlan_id))
243 return 0;
244
245 return ntohs(vlan_id);
246}
247
Simon Hormane5054642015-05-25 14:28:36 +0900248static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +0100249{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200250 return rocker_port->bridge_dev &&
251 netif_is_bridge_master(rocker_port->bridge_dev);
Simon Horman82549732015-07-16 10:39:14 +0900252}
253
254static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
255{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200256 return rocker_port->bridge_dev &&
257 netif_is_ovs_master(rocker_port->bridge_dev);
Scott Feldman6c707942014-11-28 14:34:28 +0100258}
259
Scott Feldman179f9a22015-06-12 21:35:46 -0700260#define ROCKER_OP_FLAG_REMOVE BIT(0)
261#define ROCKER_OP_FLAG_NOWAIT BIT(1)
262#define ROCKER_OP_FLAG_LEARNED BIT(2)
263#define ROCKER_OP_FLAG_REFRESH BIT(3)
264
Jiri Pirkob15edf82016-02-16 15:14:39 +0100265static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
266 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700267{
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200268 struct switchdev_trans_item *elem = NULL;
Scott Feldman179f9a22015-06-12 21:35:46 -0700269 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
270 GFP_ATOMIC : GFP_KERNEL;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700271
272 /* If in transaction prepare phase, allocate the memory
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200273 * and enqueue it on a transaction. If in transaction
274 * commit phase, dequeue the memory from the transaction
Scott Feldmanc4f20322015-05-10 09:47:50 -0700275 * rather than re-allocating the memory. The idea is the
276 * driver code paths for prepare and commit are identical
277 * so the memory allocated in the prepare phase is the
278 * memory used in the commit phase.
279 */
280
Jiri Pirko76c6f942015-09-24 10:02:44 +0200281 if (!trans) {
282 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Jiri Pirko76c6f942015-09-24 10:02:44 +0200283 } else if (switchdev_trans_ph_prepare(trans)) {
Scott Feldman179f9a22015-06-12 21:35:46 -0700284 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700285 if (!elem)
286 return NULL;
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200287 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
Jiri Pirko76c6f942015-09-24 10:02:44 +0200288 } else {
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200289 elem = switchdev_trans_item_dequeue(trans);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700290 }
291
292 return elem ? elem + 1 : NULL;
293}
294
Jiri Pirkob15edf82016-02-16 15:14:39 +0100295static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
296 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700297{
Jiri Pirkob15edf82016-02-16 15:14:39 +0100298 return __rocker_mem_alloc(trans, flags, size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700299}
300
Jiri Pirkob15edf82016-02-16 15:14:39 +0100301static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
302 size_t n, size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700303{
Jiri Pirkob15edf82016-02-16 15:14:39 +0100304 return __rocker_mem_alloc(trans, flags, n * size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700305}
306
Jiri Pirkob15edf82016-02-16 15:14:39 +0100307static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700308{
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200309 struct switchdev_trans_item *elem;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700310
311 /* Frees are ignored if in transaction prepare phase. The
312 * memory remains on the per-port list until freed in the
313 * commit phase.
314 */
315
Jiri Pirko76c6f942015-09-24 10:02:44 +0200316 if (switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -0700317 return;
318
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200319 elem = (struct switchdev_trans_item *) mem - 1;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700320 kfree(elem);
321}
322
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100323struct rocker_wait {
324 wait_queue_head_t wait;
325 bool done;
Scott Feldman179f9a22015-06-12 21:35:46 -0700326 bool nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100327};
328
329static void rocker_wait_reset(struct rocker_wait *wait)
330{
331 wait->done = false;
Scott Feldman179f9a22015-06-12 21:35:46 -0700332 wait->nowait = false;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100333}
334
335static void rocker_wait_init(struct rocker_wait *wait)
336{
337 init_waitqueue_head(&wait->wait);
338 rocker_wait_reset(wait);
339}
340
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100341static struct rocker_wait *rocker_wait_create(void)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100342{
343 struct rocker_wait *wait;
344
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100345 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100346 if (!wait)
347 return NULL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100348 return wait;
349}
350
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100351static void rocker_wait_destroy(struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100352{
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100353 kfree(wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100354}
355
356static bool rocker_wait_event_timeout(struct rocker_wait *wait,
357 unsigned long timeout)
358{
359 wait_event_timeout(wait->wait, wait->done, HZ / 10);
360 if (!wait->done)
361 return false;
362 return true;
363}
364
365static void rocker_wait_wake_up(struct rocker_wait *wait)
366{
367 wait->done = true;
368 wake_up(&wait->wait);
369}
370
Simon Hormane5054642015-05-25 14:28:36 +0900371static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100372{
373 return rocker->msix_entries[vector].vector;
374}
375
Simon Hormane5054642015-05-25 14:28:36 +0900376static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100377{
378 return rocker_msix_vector(rocker_port->rocker,
379 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
380}
381
Simon Hormane5054642015-05-25 14:28:36 +0900382static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100383{
384 return rocker_msix_vector(rocker_port->rocker,
385 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
386}
387
388#define rocker_write32(rocker, reg, val) \
389 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
390#define rocker_read32(rocker, reg) \
391 readl((rocker)->hw_addr + (ROCKER_ ## reg))
392#define rocker_write64(rocker, reg, val) \
393 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
394#define rocker_read64(rocker, reg) \
395 readq((rocker)->hw_addr + (ROCKER_ ## reg))
396
397/*****************************
398 * HW basic testing functions
399 *****************************/
400
Simon Hormane5054642015-05-25 14:28:36 +0900401static int rocker_reg_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100402{
Simon Hormane5054642015-05-25 14:28:36 +0900403 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100404 u64 test_reg;
405 u64 rnd;
406
407 rnd = prandom_u32();
408 rnd >>= 1;
409 rocker_write32(rocker, TEST_REG, rnd);
410 test_reg = rocker_read32(rocker, TEST_REG);
411 if (test_reg != rnd * 2) {
412 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
413 test_reg, rnd * 2);
414 return -EIO;
415 }
416
417 rnd = prandom_u32();
418 rnd <<= 31;
419 rnd |= prandom_u32();
420 rocker_write64(rocker, TEST_REG64, rnd);
421 test_reg = rocker_read64(rocker, TEST_REG64);
422 if (test_reg != rnd * 2) {
423 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
424 test_reg, rnd * 2);
425 return -EIO;
426 }
427
428 return 0;
429}
430
Simon Hormane5054642015-05-25 14:28:36 +0900431static int rocker_dma_test_one(const struct rocker *rocker,
432 struct rocker_wait *wait, u32 test_type,
433 dma_addr_t dma_handle, const unsigned char *buf,
434 const unsigned char *expect, size_t size)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100435{
Simon Hormane5054642015-05-25 14:28:36 +0900436 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100437 int i;
438
439 rocker_wait_reset(wait);
440 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
441
442 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
443 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
444 return -EIO;
445 }
446
447 for (i = 0; i < size; i++) {
448 if (buf[i] != expect[i]) {
449 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
450 buf[i], i, expect[i]);
451 return -EIO;
452 }
453 }
454 return 0;
455}
456
457#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
458#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
459
Simon Hormane5054642015-05-25 14:28:36 +0900460static int rocker_dma_test_offset(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100461 struct rocker_wait *wait, int offset)
462{
463 struct pci_dev *pdev = rocker->pdev;
464 unsigned char *alloc;
465 unsigned char *buf;
466 unsigned char *expect;
467 dma_addr_t dma_handle;
468 int i;
469 int err;
470
471 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
472 GFP_KERNEL | GFP_DMA);
473 if (!alloc)
474 return -ENOMEM;
475 buf = alloc + offset;
476 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
477
478 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
479 PCI_DMA_BIDIRECTIONAL);
480 if (pci_dma_mapping_error(pdev, dma_handle)) {
481 err = -EIO;
482 goto free_alloc;
483 }
484
485 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
486 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
487
488 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
489 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
490 dma_handle, buf, expect,
491 ROCKER_TEST_DMA_BUF_SIZE);
492 if (err)
493 goto unmap;
494
495 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
496 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
497 dma_handle, buf, expect,
498 ROCKER_TEST_DMA_BUF_SIZE);
499 if (err)
500 goto unmap;
501
502 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
503 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
504 expect[i] = ~buf[i];
505 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
506 dma_handle, buf, expect,
507 ROCKER_TEST_DMA_BUF_SIZE);
508 if (err)
509 goto unmap;
510
511unmap:
512 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
513 PCI_DMA_BIDIRECTIONAL);
514free_alloc:
515 kfree(alloc);
516
517 return err;
518}
519
Simon Hormane5054642015-05-25 14:28:36 +0900520static int rocker_dma_test(const struct rocker *rocker,
521 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100522{
523 int i;
524 int err;
525
526 for (i = 0; i < 8; i++) {
527 err = rocker_dma_test_offset(rocker, wait, i);
528 if (err)
529 return err;
530 }
531 return 0;
532}
533
534static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
535{
536 struct rocker_wait *wait = dev_id;
537
538 rocker_wait_wake_up(wait);
539
540 return IRQ_HANDLED;
541}
542
Simon Hormane5054642015-05-25 14:28:36 +0900543static int rocker_basic_hw_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100544{
Simon Hormane5054642015-05-25 14:28:36 +0900545 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100546 struct rocker_wait wait;
547 int err;
548
549 err = rocker_reg_test(rocker);
550 if (err) {
551 dev_err(&pdev->dev, "reg test failed\n");
552 return err;
553 }
554
555 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
556 rocker_test_irq_handler, 0,
557 rocker_driver_name, &wait);
558 if (err) {
559 dev_err(&pdev->dev, "cannot assign test irq\n");
560 return err;
561 }
562
563 rocker_wait_init(&wait);
564 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
565
566 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
567 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
568 err = -EIO;
569 goto free_irq;
570 }
571
572 err = rocker_dma_test(rocker, &wait);
573 if (err)
574 dev_err(&pdev->dev, "dma test failed\n");
575
576free_irq:
577 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
578 return err;
579}
580
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100581/******************************************
582 * DMA rings and descriptors manipulations
583 ******************************************/
584
585static u32 __pos_inc(u32 pos, size_t limit)
586{
587 return ++pos == limit ? 0 : pos;
588}
589
Simon Hormane5054642015-05-25 14:28:36 +0900590static int rocker_desc_err(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100591{
Scott Feldman7eb344f2015-02-25 20:15:36 -0800592 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
593
594 switch (err) {
595 case ROCKER_OK:
596 return 0;
597 case -ROCKER_ENOENT:
598 return -ENOENT;
599 case -ROCKER_ENXIO:
600 return -ENXIO;
601 case -ROCKER_ENOMEM:
602 return -ENOMEM;
603 case -ROCKER_EEXIST:
604 return -EEXIST;
605 case -ROCKER_EINVAL:
606 return -EINVAL;
607 case -ROCKER_EMSGSIZE:
608 return -EMSGSIZE;
609 case -ROCKER_ENOTSUP:
610 return -EOPNOTSUPP;
611 case -ROCKER_ENOBUFS:
612 return -ENOBUFS;
613 }
614
615 return -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100616}
617
Simon Hormane5054642015-05-25 14:28:36 +0900618static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100619{
620 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
621}
622
Simon Hormane5054642015-05-25 14:28:36 +0900623static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100624{
625 u32 comp_err = desc_info->desc->comp_err;
626
627 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
628}
629
Jiri Pirko11ce2ba2016-02-16 15:14:41 +0100630static void *
631rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100632{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100633 return (void *)(uintptr_t)desc_info->desc->cookie;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100634}
635
Simon Hormane5054642015-05-25 14:28:36 +0900636static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100637 void *ptr)
638{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100639 desc_info->desc->cookie = (uintptr_t) ptr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100640}
641
642static struct rocker_desc_info *
Simon Hormane5054642015-05-25 14:28:36 +0900643rocker_desc_head_get(const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100644{
645 static struct rocker_desc_info *desc_info;
646 u32 head = __pos_inc(info->head, info->size);
647
648 desc_info = &info->desc_info[info->head];
649 if (head == info->tail)
650 return NULL; /* ring full */
651 desc_info->tlv_size = 0;
652 return desc_info;
653}
654
Simon Hormane5054642015-05-25 14:28:36 +0900655static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100656{
657 desc_info->desc->buf_size = desc_info->data_size;
658 desc_info->desc->tlv_size = desc_info->tlv_size;
659}
660
Simon Hormane5054642015-05-25 14:28:36 +0900661static void rocker_desc_head_set(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100662 struct rocker_dma_ring_info *info,
Simon Hormane5054642015-05-25 14:28:36 +0900663 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100664{
665 u32 head = __pos_inc(info->head, info->size);
666
667 BUG_ON(head == info->tail);
668 rocker_desc_commit(desc_info);
669 info->head = head;
670 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
671}
672
673static struct rocker_desc_info *
674rocker_desc_tail_get(struct rocker_dma_ring_info *info)
675{
676 static struct rocker_desc_info *desc_info;
677
678 if (info->tail == info->head)
679 return NULL; /* nothing to be done between head and tail */
680 desc_info = &info->desc_info[info->tail];
681 if (!rocker_desc_gen(desc_info))
682 return NULL; /* gen bit not set, desc is not ready yet */
683 info->tail = __pos_inc(info->tail, info->size);
684 desc_info->tlv_size = desc_info->desc->tlv_size;
685 return desc_info;
686}
687
Simon Hormane5054642015-05-25 14:28:36 +0900688static void rocker_dma_ring_credits_set(const struct rocker *rocker,
689 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100690 u32 credits)
691{
692 if (credits)
693 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
694}
695
696static unsigned long rocker_dma_ring_size_fix(size_t size)
697{
698 return max(ROCKER_DMA_SIZE_MIN,
699 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
700}
701
Simon Hormane5054642015-05-25 14:28:36 +0900702static int rocker_dma_ring_create(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100703 unsigned int type,
704 size_t size,
705 struct rocker_dma_ring_info *info)
706{
707 int i;
708
709 BUG_ON(size != rocker_dma_ring_size_fix(size));
710 info->size = size;
711 info->type = type;
712 info->head = 0;
713 info->tail = 0;
714 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
715 GFP_KERNEL);
716 if (!info->desc_info)
717 return -ENOMEM;
718
719 info->desc = pci_alloc_consistent(rocker->pdev,
720 info->size * sizeof(*info->desc),
721 &info->mapaddr);
722 if (!info->desc) {
723 kfree(info->desc_info);
724 return -ENOMEM;
725 }
726
727 for (i = 0; i < info->size; i++)
728 info->desc_info[i].desc = &info->desc[i];
729
730 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
731 ROCKER_DMA_DESC_CTRL_RESET);
732 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
733 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
734
735 return 0;
736}
737
Simon Hormane5054642015-05-25 14:28:36 +0900738static void rocker_dma_ring_destroy(const struct rocker *rocker,
739 const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100740{
741 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
742
743 pci_free_consistent(rocker->pdev,
744 info->size * sizeof(struct rocker_desc),
745 info->desc, info->mapaddr);
746 kfree(info->desc_info);
747}
748
Simon Hormane5054642015-05-25 14:28:36 +0900749static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100750 struct rocker_dma_ring_info *info)
751{
752 int i;
753
754 BUG_ON(info->head || info->tail);
755
756 /* When ring is consumer, we need to advance head for each desc.
757 * That tells hw that the desc is ready to be used by it.
758 */
759 for (i = 0; i < info->size - 1; i++)
760 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
761 rocker_desc_commit(&info->desc_info[i]);
762}
763
Simon Hormane5054642015-05-25 14:28:36 +0900764static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
765 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100766 int direction, size_t buf_size)
767{
768 struct pci_dev *pdev = rocker->pdev;
769 int i;
770 int err;
771
772 for (i = 0; i < info->size; i++) {
773 struct rocker_desc_info *desc_info = &info->desc_info[i];
774 struct rocker_desc *desc = &info->desc[i];
775 dma_addr_t dma_handle;
776 char *buf;
777
778 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
779 if (!buf) {
780 err = -ENOMEM;
781 goto rollback;
782 }
783
784 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
785 if (pci_dma_mapping_error(pdev, dma_handle)) {
786 kfree(buf);
787 err = -EIO;
788 goto rollback;
789 }
790
791 desc_info->data = buf;
792 desc_info->data_size = buf_size;
793 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
794
795 desc->buf_addr = dma_handle;
796 desc->buf_size = buf_size;
797 }
798 return 0;
799
800rollback:
801 for (i--; i >= 0; i--) {
Simon Hormane5054642015-05-25 14:28:36 +0900802 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100803
804 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
805 desc_info->data_size, direction);
806 kfree(desc_info->data);
807 }
808 return err;
809}
810
Simon Hormane5054642015-05-25 14:28:36 +0900811static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
812 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100813 int direction)
814{
815 struct pci_dev *pdev = rocker->pdev;
816 int i;
817
818 for (i = 0; i < info->size; i++) {
Simon Hormane5054642015-05-25 14:28:36 +0900819 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100820 struct rocker_desc *desc = &info->desc[i];
821
822 desc->buf_addr = 0;
823 desc->buf_size = 0;
824 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
825 desc_info->data_size, direction);
826 kfree(desc_info->data);
827 }
828}
829
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100830static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info)
831{
832 struct rocker_wait *wait;
833
834 wait = rocker_wait_create();
835 if (!wait)
836 return -ENOMEM;
837 rocker_desc_cookie_ptr_set(desc_info, wait);
838 return 0;
839}
840
841static void
842rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info)
843{
844 struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info);
845
846 rocker_wait_destroy(wait);
847}
848
849static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker)
850{
851 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
852 int i;
853 int err;
854
855 for (i = 0; i < cmd_ring->size; i++) {
856 err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]);
857 if (err)
858 goto rollback;
859 }
860 return 0;
861
862rollback:
863 for (i--; i >= 0; i--)
864 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
865 return err;
866}
867
868static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker)
869{
870 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
871 int i;
872
873 for (i = 0; i < cmd_ring->size; i++)
874 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
875}
876
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100877static int rocker_dma_rings_init(struct rocker *rocker)
878{
Simon Hormane5054642015-05-25 14:28:36 +0900879 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100880 int err;
881
882 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
883 ROCKER_DMA_CMD_DEFAULT_SIZE,
884 &rocker->cmd_ring);
885 if (err) {
886 dev_err(&pdev->dev, "failed to create command dma ring\n");
887 return err;
888 }
889
890 spin_lock_init(&rocker->cmd_ring_lock);
891
892 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
893 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
894 if (err) {
895 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
896 goto err_dma_cmd_ring_bufs_alloc;
897 }
898
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100899 err = rocker_dma_cmd_ring_waits_alloc(rocker);
900 if (err) {
901 dev_err(&pdev->dev, "failed to alloc command dma ring waits\n");
902 goto err_dma_cmd_ring_waits_alloc;
903 }
904
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100905 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
906 ROCKER_DMA_EVENT_DEFAULT_SIZE,
907 &rocker->event_ring);
908 if (err) {
909 dev_err(&pdev->dev, "failed to create event dma ring\n");
910 goto err_dma_event_ring_create;
911 }
912
913 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
914 PCI_DMA_FROMDEVICE, PAGE_SIZE);
915 if (err) {
916 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
917 goto err_dma_event_ring_bufs_alloc;
918 }
919 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
920 return 0;
921
922err_dma_event_ring_bufs_alloc:
923 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
924err_dma_event_ring_create:
925 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
926 PCI_DMA_BIDIRECTIONAL);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100927err_dma_cmd_ring_waits_alloc:
928 rocker_dma_cmd_ring_waits_free(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100929err_dma_cmd_ring_bufs_alloc:
930 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
931 return err;
932}
933
934static void rocker_dma_rings_fini(struct rocker *rocker)
935{
936 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
937 PCI_DMA_BIDIRECTIONAL);
938 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100939 rocker_dma_cmd_ring_waits_free(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100940 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
941 PCI_DMA_BIDIRECTIONAL);
942 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
943}
944
Simon Horman534ba6a2015-06-01 13:25:04 +0900945static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100946 struct rocker_desc_info *desc_info,
947 struct sk_buff *skb, size_t buf_len)
948{
Simon Horman534ba6a2015-06-01 13:25:04 +0900949 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100950 struct pci_dev *pdev = rocker->pdev;
951 dma_addr_t dma_handle;
952
953 dma_handle = pci_map_single(pdev, skb->data, buf_len,
954 PCI_DMA_FROMDEVICE);
955 if (pci_dma_mapping_error(pdev, dma_handle))
956 return -EIO;
957 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
958 goto tlv_put_failure;
959 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
960 goto tlv_put_failure;
961 return 0;
962
963tlv_put_failure:
964 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
965 desc_info->tlv_size = 0;
966 return -EMSGSIZE;
967}
968
Simon Hormane5054642015-05-25 14:28:36 +0900969static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100970{
971 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
972}
973
Simon Horman534ba6a2015-06-01 13:25:04 +0900974static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100975 struct rocker_desc_info *desc_info)
976{
977 struct net_device *dev = rocker_port->dev;
978 struct sk_buff *skb;
979 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
980 int err;
981
982 /* Ensure that hw will see tlv_size zero in case of an error.
983 * That tells hw to use another descriptor.
984 */
985 rocker_desc_cookie_ptr_set(desc_info, NULL);
986 desc_info->tlv_size = 0;
987
988 skb = netdev_alloc_skb_ip_align(dev, buf_len);
989 if (!skb)
990 return -ENOMEM;
Simon Horman534ba6a2015-06-01 13:25:04 +0900991 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100992 if (err) {
993 dev_kfree_skb_any(skb);
994 return err;
995 }
996 rocker_desc_cookie_ptr_set(desc_info, skb);
997 return 0;
998}
999
Simon Hormane5054642015-05-25 14:28:36 +09001000static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1001 const struct rocker_tlv **attrs)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001002{
1003 struct pci_dev *pdev = rocker->pdev;
1004 dma_addr_t dma_handle;
1005 size_t len;
1006
1007 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1008 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1009 return;
1010 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1011 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1012 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1013}
1014
Simon Hormane5054642015-05-25 14:28:36 +09001015static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1016 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001017{
Simon Hormane5054642015-05-25 14:28:36 +09001018 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001019 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1020
1021 if (!skb)
1022 return;
1023 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1024 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1025 dev_kfree_skb_any(skb);
1026}
1027
Simon Horman534ba6a2015-06-01 13:25:04 +09001028static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001029{
Simon Hormane5054642015-05-25 14:28:36 +09001030 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001031 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001032 int i;
1033 int err;
1034
1035 for (i = 0; i < rx_ring->size; i++) {
Simon Horman534ba6a2015-06-01 13:25:04 +09001036 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001037 &rx_ring->desc_info[i]);
1038 if (err)
1039 goto rollback;
1040 }
1041 return 0;
1042
1043rollback:
1044 for (i--; i >= 0; i--)
1045 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1046 return err;
1047}
1048
Simon Horman534ba6a2015-06-01 13:25:04 +09001049static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001050{
Simon Hormane5054642015-05-25 14:28:36 +09001051 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001052 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001053 int i;
1054
1055 for (i = 0; i < rx_ring->size; i++)
1056 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1057}
1058
1059static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1060{
1061 struct rocker *rocker = rocker_port->rocker;
1062 int err;
1063
1064 err = rocker_dma_ring_create(rocker,
1065 ROCKER_DMA_TX(rocker_port->port_number),
1066 ROCKER_DMA_TX_DEFAULT_SIZE,
1067 &rocker_port->tx_ring);
1068 if (err) {
1069 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1070 return err;
1071 }
1072
1073 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1074 PCI_DMA_TODEVICE,
1075 ROCKER_DMA_TX_DESC_SIZE);
1076 if (err) {
1077 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1078 goto err_dma_tx_ring_bufs_alloc;
1079 }
1080
1081 err = rocker_dma_ring_create(rocker,
1082 ROCKER_DMA_RX(rocker_port->port_number),
1083 ROCKER_DMA_RX_DEFAULT_SIZE,
1084 &rocker_port->rx_ring);
1085 if (err) {
1086 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1087 goto err_dma_rx_ring_create;
1088 }
1089
1090 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1091 PCI_DMA_BIDIRECTIONAL,
1092 ROCKER_DMA_RX_DESC_SIZE);
1093 if (err) {
1094 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1095 goto err_dma_rx_ring_bufs_alloc;
1096 }
1097
Simon Horman534ba6a2015-06-01 13:25:04 +09001098 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001099 if (err) {
1100 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1101 goto err_dma_rx_ring_skbs_alloc;
1102 }
1103 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1104
1105 return 0;
1106
1107err_dma_rx_ring_skbs_alloc:
1108 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1109 PCI_DMA_BIDIRECTIONAL);
1110err_dma_rx_ring_bufs_alloc:
1111 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1112err_dma_rx_ring_create:
1113 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1114 PCI_DMA_TODEVICE);
1115err_dma_tx_ring_bufs_alloc:
1116 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1117 return err;
1118}
1119
1120static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1121{
1122 struct rocker *rocker = rocker_port->rocker;
1123
Simon Horman534ba6a2015-06-01 13:25:04 +09001124 rocker_dma_rx_ring_skbs_free(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001125 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1126 PCI_DMA_BIDIRECTIONAL);
1127 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1128 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1129 PCI_DMA_TODEVICE);
1130 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1131}
1132
Simon Hormane5054642015-05-25 14:28:36 +09001133static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1134 bool enable)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001135{
1136 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1137
1138 if (enable)
David S. Miller71a83a62015-03-03 21:16:48 -05001139 val |= 1ULL << rocker_port->pport;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001140 else
David S. Miller71a83a62015-03-03 21:16:48 -05001141 val &= ~(1ULL << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001142 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1143}
1144
1145/********************************
1146 * Interrupt handler and helpers
1147 ********************************/
1148
1149static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1150{
1151 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001152 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001153 struct rocker_wait *wait;
1154 u32 credits = 0;
1155
1156 spin_lock(&rocker->cmd_ring_lock);
1157 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1158 wait = rocker_desc_cookie_ptr_get(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001159 if (wait->nowait) {
1160 rocker_desc_gen_clear(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001161 } else {
1162 rocker_wait_wake_up(wait);
1163 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001164 credits++;
1165 }
1166 spin_unlock(&rocker->cmd_ring_lock);
1167 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1168
1169 return IRQ_HANDLED;
1170}
1171
Simon Hormane5054642015-05-25 14:28:36 +09001172static void rocker_port_link_up(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001173{
1174 netif_carrier_on(rocker_port->dev);
1175 netdev_info(rocker_port->dev, "Link is up\n");
1176}
1177
Simon Hormane5054642015-05-25 14:28:36 +09001178static void rocker_port_link_down(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001179{
1180 netif_carrier_off(rocker_port->dev);
1181 netdev_info(rocker_port->dev, "Link is down\n");
1182}
1183
Simon Hormane5054642015-05-25 14:28:36 +09001184static int rocker_event_link_change(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001185 const struct rocker_tlv *info)
1186{
Simon Hormane5054642015-05-25 14:28:36 +09001187 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001188 unsigned int port_number;
1189 bool link_up;
1190 struct rocker_port *rocker_port;
1191
1192 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001193 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001194 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1195 return -EIO;
1196 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001197 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001198 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1199
1200 if (port_number >= rocker->port_count)
1201 return -EINVAL;
1202
1203 rocker_port = rocker->ports[port_number];
1204 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1205 if (link_up)
1206 rocker_port_link_up(rocker_port);
1207 else
1208 rocker_port_link_down(rocker_port);
1209 }
1210
1211 return 0;
1212}
1213
Scott Feldman6c707942014-11-28 14:34:28 +01001214static int rocker_port_fdb(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02001215 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01001216 const unsigned char *addr,
1217 __be16 vlan_id, int flags);
Jiri Pirkoe4201142016-02-16 15:14:45 +01001218static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1219 const unsigned char *addr,
1220 __be16 vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01001221
Simon Hormane5054642015-05-25 14:28:36 +09001222static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
Scott Feldman6c707942014-11-28 14:34:28 +01001223 const struct rocker_tlv *info)
1224{
Simon Hormane5054642015-05-25 14:28:36 +09001225 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
Scott Feldman6c707942014-11-28 14:34:28 +01001226 unsigned int port_number;
1227 struct rocker_port *rocker_port;
Simon Hormane5054642015-05-25 14:28:36 +09001228 const unsigned char *addr;
Scott Feldman92014b92015-06-12 21:35:49 -07001229 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
Scott Feldman6c707942014-11-28 14:34:28 +01001230 __be16 vlan_id;
Jiri Pirkoe4201142016-02-16 15:14:45 +01001231 int err;
Scott Feldman6c707942014-11-28 14:34:28 +01001232
1233 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001234 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
Scott Feldman6c707942014-11-28 14:34:28 +01001235 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1236 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1237 return -EIO;
1238 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001239 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
Scott Feldman6c707942014-11-28 14:34:28 +01001240 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
Jiri Pirko9b03c712014-12-03 14:14:53 +01001241 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
Scott Feldman6c707942014-11-28 14:34:28 +01001242
1243 if (port_number >= rocker->port_count)
1244 return -EINVAL;
1245
1246 rocker_port = rocker->ports[port_number];
1247
Jiri Pirkoe4201142016-02-16 15:14:45 +01001248 err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1249 if (err)
1250 return err;
1251
Scott Feldman6c707942014-11-28 14:34:28 +01001252 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1253 rocker_port->stp_state != BR_STATE_FORWARDING)
1254 return 0;
1255
Jiri Pirko76c6f942015-09-24 10:02:44 +02001256 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01001257}
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001258
Simon Hormane5054642015-05-25 14:28:36 +09001259static int rocker_event_process(const struct rocker *rocker,
1260 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001261{
Simon Hormane5054642015-05-25 14:28:36 +09001262 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1263 const struct rocker_tlv *info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001264 u16 type;
1265
1266 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1267 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1268 !attrs[ROCKER_TLV_EVENT_INFO])
1269 return -EIO;
1270
1271 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1272 info = attrs[ROCKER_TLV_EVENT_INFO];
1273
1274 switch (type) {
1275 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1276 return rocker_event_link_change(rocker, info);
Scott Feldman6c707942014-11-28 14:34:28 +01001277 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1278 return rocker_event_mac_vlan_seen(rocker, info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001279 }
1280
1281 return -EOPNOTSUPP;
1282}
1283
1284static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1285{
1286 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001287 const struct pci_dev *pdev = rocker->pdev;
1288 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001289 u32 credits = 0;
1290 int err;
1291
1292 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1293 err = rocker_desc_err(desc_info);
1294 if (err) {
1295 dev_err(&pdev->dev, "event desc received with err %d\n",
1296 err);
1297 } else {
1298 err = rocker_event_process(rocker, desc_info);
1299 if (err)
1300 dev_err(&pdev->dev, "event processing failed with err %d\n",
1301 err);
1302 }
1303 rocker_desc_gen_clear(desc_info);
1304 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1305 credits++;
1306 }
1307 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1308
1309 return IRQ_HANDLED;
1310}
1311
1312static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1313{
1314 struct rocker_port *rocker_port = dev_id;
1315
1316 napi_schedule(&rocker_port->napi_tx);
1317 return IRQ_HANDLED;
1318}
1319
1320static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1321{
1322 struct rocker_port *rocker_port = dev_id;
1323
1324 napi_schedule(&rocker_port->napi_rx);
1325 return IRQ_HANDLED;
1326}
1327
1328/********************
1329 * Command interface
1330 ********************/
1331
Simon Horman534ba6a2015-06-01 13:25:04 +09001332typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001333 struct rocker_desc_info *desc_info,
1334 void *priv);
1335
Simon Horman534ba6a2015-06-01 13:25:04 +09001336typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001337 const struct rocker_desc_info *desc_info,
1338 void *priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001339
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001340static int rocker_cmd_exec(struct rocker_port *rocker_port, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09001341 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1342 rocker_cmd_proc_cb_t process, void *process_priv)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001343{
Simon Horman534ba6a2015-06-01 13:25:04 +09001344 struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001345 struct rocker_desc_info *desc_info;
1346 struct rocker_wait *wait;
Scott Feldman179f9a22015-06-12 21:35:46 -07001347 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1348 unsigned long lock_flags;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001349 int err;
1350
Scott Feldman179f9a22015-06-12 21:35:46 -07001351 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001352
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001353 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1354 if (!desc_info) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001355 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +01001356 return -EAGAIN;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001357 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001358
Jiri Pirkoca0a5f22016-02-16 15:14:47 +01001359 wait = rocker_desc_cookie_ptr_get(desc_info);
1360 rocker_wait_init(wait);
1361 wait->nowait = nowait;
1362
Simon Horman534ba6a2015-06-01 13:25:04 +09001363 err = prepare(rocker_port, desc_info, prepare_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001364 if (err) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001365 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +01001366 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001367 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001368
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001369 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001370
Scott Feldman179f9a22015-06-12 21:35:46 -07001371 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1372
1373 if (nowait)
1374 return 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001375
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001376 if (!rocker_wait_event_timeout(wait, HZ / 10))
1377 return -EIO;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001378
1379 err = rocker_desc_err(desc_info);
1380 if (err)
1381 return err;
1382
1383 if (process)
Simon Horman534ba6a2015-06-01 13:25:04 +09001384 err = process(rocker_port, desc_info, process_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001385
1386 rocker_desc_gen_clear(desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001387 return err;
1388}
1389
1390static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001391rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001392 struct rocker_desc_info *desc_info,
1393 void *priv)
1394{
1395 struct rocker_tlv *cmd_info;
1396
1397 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1398 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1399 return -EMSGSIZE;
1400 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1401 if (!cmd_info)
1402 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001403 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1404 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001405 return -EMSGSIZE;
1406 rocker_tlv_nest_end(desc_info, cmd_info);
1407 return 0;
1408}
1409
1410static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001411rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001412 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001413 void *priv)
1414{
1415 struct ethtool_cmd *ecmd = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001416 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1417 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001418 u32 speed;
1419 u8 duplex;
1420 u8 autoneg;
1421
1422 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1423 if (!attrs[ROCKER_TLV_CMD_INFO])
1424 return -EIO;
1425
1426 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1427 attrs[ROCKER_TLV_CMD_INFO]);
1428 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1429 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1430 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1431 return -EIO;
1432
1433 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1434 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1435 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1436
1437 ecmd->transceiver = XCVR_INTERNAL;
1438 ecmd->supported = SUPPORTED_TP;
1439 ecmd->phy_address = 0xff;
1440 ecmd->port = PORT_TP;
1441 ethtool_cmd_speed_set(ecmd, speed);
1442 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1443 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1444
1445 return 0;
1446}
1447
1448static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001449rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001450 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001451 void *priv)
1452{
1453 unsigned char *macaddr = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001454 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1455 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1456 const struct rocker_tlv *attr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001457
1458 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1459 if (!attrs[ROCKER_TLV_CMD_INFO])
1460 return -EIO;
1461
1462 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1463 attrs[ROCKER_TLV_CMD_INFO]);
1464 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1465 if (!attr)
1466 return -EIO;
1467
1468 if (rocker_tlv_len(attr) != ETH_ALEN)
1469 return -EINVAL;
1470
1471 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1472 return 0;
1473}
1474
Jiri Pirkoe1ba3de2016-02-16 15:14:43 +01001475static int
1476rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1477 const struct rocker_desc_info *desc_info,
1478 void *priv)
1479{
1480 u8 *p_mode = priv;
1481 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1482 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1483 const struct rocker_tlv *attr;
1484
1485 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1486 if (!attrs[ROCKER_TLV_CMD_INFO])
1487 return -EIO;
1488
1489 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1490 attrs[ROCKER_TLV_CMD_INFO]);
1491 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1492 if (!attr)
1493 return -EIO;
1494
1495 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1496 return 0;
1497}
1498
David Aherndb191702015-03-17 20:23:16 -06001499struct port_name {
1500 char *buf;
1501 size_t len;
1502};
1503
1504static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001505rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001506 const struct rocker_desc_info *desc_info,
David Aherndb191702015-03-17 20:23:16 -06001507 void *priv)
1508{
Simon Hormane5054642015-05-25 14:28:36 +09001509 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1510 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
David Aherndb191702015-03-17 20:23:16 -06001511 struct port_name *name = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001512 const struct rocker_tlv *attr;
David Aherndb191702015-03-17 20:23:16 -06001513 size_t i, j, len;
Simon Hormane5054642015-05-25 14:28:36 +09001514 const char *str;
David Aherndb191702015-03-17 20:23:16 -06001515
1516 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1517 if (!attrs[ROCKER_TLV_CMD_INFO])
1518 return -EIO;
1519
1520 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1521 attrs[ROCKER_TLV_CMD_INFO]);
1522 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1523 if (!attr)
1524 return -EIO;
1525
1526 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1527 str = rocker_tlv_data(attr);
1528
1529 /* make sure name only contains alphanumeric characters */
1530 for (i = j = 0; i < len; ++i) {
1531 if (isalnum(str[i])) {
1532 name->buf[j] = str[i];
1533 j++;
1534 }
1535 }
1536
1537 if (j == 0)
1538 return -EIO;
1539
1540 name->buf[j] = '\0';
1541
1542 return 0;
1543}
1544
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001545static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001546rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001547 struct rocker_desc_info *desc_info,
1548 void *priv)
1549{
1550 struct ethtool_cmd *ecmd = priv;
1551 struct rocker_tlv *cmd_info;
1552
1553 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1554 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1555 return -EMSGSIZE;
1556 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1557 if (!cmd_info)
1558 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001559 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1560 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001561 return -EMSGSIZE;
1562 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1563 ethtool_cmd_speed(ecmd)))
1564 return -EMSGSIZE;
1565 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1566 ecmd->duplex))
1567 return -EMSGSIZE;
1568 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1569 ecmd->autoneg))
1570 return -EMSGSIZE;
1571 rocker_tlv_nest_end(desc_info, cmd_info);
1572 return 0;
1573}
1574
1575static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001576rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001577 struct rocker_desc_info *desc_info,
1578 void *priv)
1579{
Simon Hormane5054642015-05-25 14:28:36 +09001580 const unsigned char *macaddr = priv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001581 struct rocker_tlv *cmd_info;
1582
1583 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1584 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1585 return -EMSGSIZE;
1586 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1587 if (!cmd_info)
1588 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001589 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1590 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001591 return -EMSGSIZE;
1592 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1593 ETH_ALEN, macaddr))
1594 return -EMSGSIZE;
1595 rocker_tlv_nest_end(desc_info, cmd_info);
1596 return 0;
1597}
1598
Scott Feldman5111f802014-11-28 14:34:30 +01001599static int
Scott Feldman77a58c72015-07-08 16:06:47 -07001600rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1601 struct rocker_desc_info *desc_info,
1602 void *priv)
1603{
1604 int mtu = *(int *)priv;
1605 struct rocker_tlv *cmd_info;
1606
1607 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1608 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1609 return -EMSGSIZE;
1610 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1611 if (!cmd_info)
1612 return -EMSGSIZE;
1613 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1614 rocker_port->pport))
1615 return -EMSGSIZE;
1616 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1617 mtu))
1618 return -EMSGSIZE;
1619 rocker_tlv_nest_end(desc_info, cmd_info);
1620 return 0;
1621}
1622
1623static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001624rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
Scott Feldman5111f802014-11-28 14:34:30 +01001625 struct rocker_desc_info *desc_info,
1626 void *priv)
1627{
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001628 bool learning = *(bool *)priv;
Scott Feldman5111f802014-11-28 14:34:30 +01001629 struct rocker_tlv *cmd_info;
1630
1631 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1632 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1633 return -EMSGSIZE;
1634 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1635 if (!cmd_info)
1636 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001637 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1638 rocker_port->pport))
Scott Feldman5111f802014-11-28 14:34:30 +01001639 return -EMSGSIZE;
1640 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001641 learning))
Scott Feldman5111f802014-11-28 14:34:30 +01001642 return -EMSGSIZE;
1643 rocker_tlv_nest_end(desc_info, cmd_info);
1644 return 0;
1645}
1646
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001647static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1648 struct ethtool_cmd *ecmd)
1649{
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001650 return rocker_cmd_exec(rocker_port, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001651 rocker_cmd_get_port_settings_prep, NULL,
1652 rocker_cmd_get_port_settings_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001653 ecmd);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001654}
1655
1656static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1657 unsigned char *macaddr)
1658{
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001659 return rocker_cmd_exec(rocker_port, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001660 rocker_cmd_get_port_settings_prep, NULL,
1661 rocker_cmd_get_port_settings_macaddr_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001662 macaddr);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001663}
1664
Jiri Pirkoe1ba3de2016-02-16 15:14:43 +01001665static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1666 u8 *p_mode)
1667{
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001668 return rocker_cmd_exec(rocker_port, 0,
Jiri Pirkoe1ba3de2016-02-16 15:14:43 +01001669 rocker_cmd_get_port_settings_prep, NULL,
1670 rocker_cmd_get_port_settings_mode_proc, p_mode);
1671}
1672
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001673static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1674 struct ethtool_cmd *ecmd)
1675{
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001676 return rocker_cmd_exec(rocker_port, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001677 rocker_cmd_set_port_settings_ethtool_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001678 ecmd, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001679}
1680
1681static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1682 unsigned char *macaddr)
1683{
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001684 return rocker_cmd_exec(rocker_port, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001685 rocker_cmd_set_port_settings_macaddr_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001686 macaddr, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001687}
1688
Scott Feldman77a58c72015-07-08 16:06:47 -07001689static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1690 int mtu)
1691{
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001692 return rocker_cmd_exec(rocker_port, 0,
Scott Feldman77a58c72015-07-08 16:06:47 -07001693 rocker_cmd_set_port_settings_mtu_prep,
1694 &mtu, NULL, NULL);
1695}
1696
Scott Feldmanc4f20322015-05-10 09:47:50 -07001697static int rocker_port_set_learning(struct rocker_port *rocker_port,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001698 bool learning)
Scott Feldman5111f802014-11-28 14:34:30 +01001699{
Jiri Pirkoae3907e2016-02-16 15:14:48 +01001700 return rocker_cmd_exec(rocker_port, 0,
Scott Feldman5111f802014-11-28 14:34:30 +01001701 rocker_cmd_set_port_learning_prep,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001702 &learning, NULL, NULL);
Scott Feldman5111f802014-11-28 14:34:30 +01001703}
1704
Jiri Pirkoe4201142016-02-16 15:14:45 +01001705/**********************
1706 * Worlds manipulation
1707 **********************/
1708
1709static struct rocker_world_ops *rocker_world_ops[] = {
1710 &rocker_ofdpa_ops,
1711};
1712
1713#define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1714
1715static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1716{
1717 int i;
1718
1719 for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1720 if (rocker_world_ops[i]->mode == mode)
1721 return rocker_world_ops[i];
1722 return NULL;
1723}
1724
1725static int rocker_world_init(struct rocker *rocker, u8 mode)
1726{
1727 struct rocker_world_ops *wops;
1728 int err;
1729
1730 wops = rocker_world_ops_find(mode);
1731 if (!wops) {
1732 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1733 mode);
1734 return -EINVAL;
1735 }
1736 rocker->wops = wops;
1737 rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1738 if (!rocker->wpriv)
1739 return -ENOMEM;
1740 if (!wops->init)
1741 return 0;
1742 err = wops->init(rocker);
1743 if (err)
1744 kfree(rocker->wpriv);
1745 return err;
1746}
1747
1748static void rocker_world_fini(struct rocker *rocker)
1749{
1750 struct rocker_world_ops *wops = rocker->wops;
1751
1752 if (!wops || !wops->fini)
1753 return;
1754 wops->fini(rocker);
1755 kfree(rocker->wpriv);
1756}
1757
1758static int rocker_world_check_init(struct rocker_port *rocker_port)
1759{
1760 struct rocker *rocker = rocker_port->rocker;
1761 u8 mode;
1762 int err;
1763
1764 err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1765 if (err) {
1766 dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1767 return err;
1768 }
1769 if (rocker->wops) {
1770 if (rocker->wops->mode != mode) {
1771 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1772 return err;
1773 }
1774 return 0;
1775 }
1776 return rocker_world_init(rocker, mode);
1777}
1778
1779static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1780{
1781 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1782 int err;
1783
1784 rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1785 if (!rocker_port->wpriv)
1786 return -ENOMEM;
1787 if (!wops->port_pre_init)
1788 return 0;
1789 err = wops->port_pre_init(rocker_port);
1790 if (err)
1791 kfree(rocker_port->wpriv);
1792 return 0;
1793}
1794
1795static int rocker_world_port_init(struct rocker_port *rocker_port)
1796{
1797 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1798
1799 if (!wops->port_init)
1800 return 0;
1801 return wops->port_init(rocker_port);
1802}
1803
1804static void rocker_world_port_fini(struct rocker_port *rocker_port)
1805{
1806 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1807
1808 if (!wops->port_fini)
1809 return;
1810 wops->port_fini(rocker_port);
1811}
1812
1813static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1814{
1815 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1816
1817 if (!wops->port_post_fini)
1818 return;
1819 wops->port_post_fini(rocker_port);
1820 kfree(rocker_port->wpriv);
1821}
1822
1823static int rocker_world_port_open(struct rocker_port *rocker_port)
1824{
1825 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1826
1827 if (!wops->port_open)
1828 return 0;
1829 return wops->port_open(rocker_port);
1830}
1831
1832static void rocker_world_port_stop(struct rocker_port *rocker_port)
1833{
1834 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1835
1836 if (!wops->port_stop)
1837 return;
1838 wops->port_stop(rocker_port);
1839}
1840
1841static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1842 u8 state,
1843 struct switchdev_trans *trans)
1844{
1845 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1846
1847 if (!wops->port_attr_stp_state_set)
1848 return 0;
1849 return wops->port_attr_stp_state_set(rocker_port, state, trans);
1850}
1851
1852static int
1853rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1854 unsigned long brport_flags,
1855 struct switchdev_trans *trans)
1856{
1857 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1858
1859 if (!wops->port_attr_bridge_flags_set)
1860 return 0;
1861 return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
1862 trans);
1863}
1864
1865static int
1866rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
1867 unsigned long *p_brport_flags)
1868{
1869 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1870
1871 if (!wops->port_attr_bridge_flags_get)
1872 return 0;
1873 return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
1874}
1875
1876static int
1877rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1878 u32 ageing_time,
1879 struct switchdev_trans *trans)
1880
1881{
1882 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1883
1884 if (!wops->port_attr_bridge_ageing_time_set)
1885 return 0;
1886 return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
1887 trans);
1888}
1889
1890static int
1891rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1892 const struct switchdev_obj_port_vlan *vlan,
1893 struct switchdev_trans *trans)
1894{
1895 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1896
1897 if (!wops->port_obj_vlan_add)
1898 return 0;
1899 return wops->port_obj_vlan_add(rocker_port, vlan, trans);
1900}
1901
1902static int
1903rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1904 const struct switchdev_obj_port_vlan *vlan)
1905{
1906 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1907
1908 if (!wops->port_obj_vlan_del)
1909 return 0;
1910 return wops->port_obj_vlan_del(rocker_port, vlan);
1911}
1912
1913static int
1914rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
1915 struct switchdev_obj_port_vlan *vlan,
1916 switchdev_obj_dump_cb_t *cb)
1917{
1918 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1919
1920 if (!wops->port_obj_vlan_dump)
1921 return 0;
1922 return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
1923}
1924
1925static int
1926rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
1927 const struct switchdev_obj_ipv4_fib *fib4,
1928 struct switchdev_trans *trans)
1929{
1930 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1931
1932 if (!wops->port_obj_fib4_add)
1933 return 0;
1934 return wops->port_obj_fib4_add(rocker_port, fib4, trans);
1935}
1936
1937static int
1938rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
1939 const struct switchdev_obj_ipv4_fib *fib4)
1940{
1941 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1942
1943 if (!wops->port_obj_fib4_del)
1944 return 0;
1945 return wops->port_obj_fib4_del(rocker_port, fib4);
1946}
1947
1948static int
1949rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
1950 const struct switchdev_obj_port_fdb *fdb,
1951 struct switchdev_trans *trans)
1952{
1953 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1954
1955 if (!wops->port_obj_fdb_add)
1956 return 0;
1957 return wops->port_obj_fdb_add(rocker_port, fdb, trans);
1958}
1959
1960static int
1961rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
1962 const struct switchdev_obj_port_fdb *fdb)
1963{
1964 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1965
1966 if (!wops->port_obj_fdb_del)
1967 return 0;
1968 return wops->port_obj_fdb_del(rocker_port, fdb);
1969}
1970
1971static int
1972rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
1973 struct switchdev_obj_port_fdb *fdb,
1974 switchdev_obj_dump_cb_t *cb)
1975{
1976 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1977
1978 if (!wops->port_obj_fdb_dump)
1979 return 0;
1980 return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
1981}
1982
1983static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1984 struct net_device *master)
1985{
1986 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1987
1988 if (!wops->port_master_linked)
1989 return 0;
1990 return wops->port_master_linked(rocker_port, master);
1991}
1992
1993static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1994 struct net_device *master)
1995{
1996 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1997
1998 if (!wops->port_master_unlinked)
1999 return 0;
2000 return wops->port_master_unlinked(rocker_port, master);
2001}
2002
2003static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
2004 struct neighbour *n)
2005{
2006 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2007
2008 if (!wops->port_neigh_update)
2009 return 0;
2010 return wops->port_neigh_update(rocker_port, n);
2011}
2012
2013static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
2014 struct neighbour *n)
2015{
2016 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2017
2018 if (!wops->port_neigh_destroy)
2019 return 0;
2020 return wops->port_neigh_destroy(rocker_port, n);
2021}
2022
2023static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2024 const unsigned char *addr,
2025 __be16 vlan_id)
2026{
2027 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2028
2029 if (!wops->port_ev_mac_vlan_seen)
2030 return 0;
2031 return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
2032}
2033
Simon Hormane5054642015-05-25 14:28:36 +09002034static int
2035rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
2036 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002037{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002038 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2039 entry->key.ig_port.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002040 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002041 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2042 entry->key.ig_port.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002043 return -EMSGSIZE;
2044 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2045 entry->key.ig_port.goto_tbl))
2046 return -EMSGSIZE;
2047
2048 return 0;
2049}
2050
Simon Hormane5054642015-05-25 14:28:36 +09002051static int
2052rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
2053 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002054{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002055 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2056 entry->key.vlan.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002057 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002058 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2059 entry->key.vlan.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002060 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002061 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2062 entry->key.vlan.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002063 return -EMSGSIZE;
2064 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2065 entry->key.vlan.goto_tbl))
2066 return -EMSGSIZE;
2067 if (entry->key.vlan.untagged &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002068 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
2069 entry->key.vlan.new_vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002070 return -EMSGSIZE;
2071
2072 return 0;
2073}
2074
Simon Hormane5054642015-05-25 14:28:36 +09002075static int
2076rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
2077 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002078{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002079 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2080 entry->key.term_mac.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002081 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002082 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2083 entry->key.term_mac.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002084 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002085 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2086 entry->key.term_mac.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002087 return -EMSGSIZE;
2088 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2089 ETH_ALEN, entry->key.term_mac.eth_dst))
2090 return -EMSGSIZE;
2091 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2092 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
2093 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002094 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2095 entry->key.term_mac.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002096 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002097 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2098 entry->key.term_mac.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002099 return -EMSGSIZE;
2100 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2101 entry->key.term_mac.goto_tbl))
2102 return -EMSGSIZE;
2103 if (entry->key.term_mac.copy_to_cpu &&
2104 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2105 entry->key.term_mac.copy_to_cpu))
2106 return -EMSGSIZE;
2107
2108 return 0;
2109}
2110
2111static int
2112rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002113 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002114{
Jiri Pirko9b03c712014-12-03 14:14:53 +01002115 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2116 entry->key.ucast_routing.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002117 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002118 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2119 entry->key.ucast_routing.dst4))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002120 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002121 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2122 entry->key.ucast_routing.dst4_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002123 return -EMSGSIZE;
2124 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2125 entry->key.ucast_routing.goto_tbl))
2126 return -EMSGSIZE;
2127 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2128 entry->key.ucast_routing.group_id))
2129 return -EMSGSIZE;
2130
2131 return 0;
2132}
2133
Simon Hormane5054642015-05-25 14:28:36 +09002134static int
2135rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2136 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002137{
2138 if (entry->key.bridge.has_eth_dst &&
2139 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2140 ETH_ALEN, entry->key.bridge.eth_dst))
2141 return -EMSGSIZE;
2142 if (entry->key.bridge.has_eth_dst_mask &&
2143 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2144 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2145 return -EMSGSIZE;
2146 if (entry->key.bridge.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002147 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2148 entry->key.bridge.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002149 return -EMSGSIZE;
2150 if (entry->key.bridge.tunnel_id &&
2151 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2152 entry->key.bridge.tunnel_id))
2153 return -EMSGSIZE;
2154 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2155 entry->key.bridge.goto_tbl))
2156 return -EMSGSIZE;
2157 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2158 entry->key.bridge.group_id))
2159 return -EMSGSIZE;
2160 if (entry->key.bridge.copy_to_cpu &&
2161 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2162 entry->key.bridge.copy_to_cpu))
2163 return -EMSGSIZE;
2164
2165 return 0;
2166}
2167
Simon Hormane5054642015-05-25 14:28:36 +09002168static int
2169rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2170 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002171{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002172 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2173 entry->key.acl.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002174 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002175 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2176 entry->key.acl.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002177 return -EMSGSIZE;
2178 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2179 ETH_ALEN, entry->key.acl.eth_src))
2180 return -EMSGSIZE;
2181 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2182 ETH_ALEN, entry->key.acl.eth_src_mask))
2183 return -EMSGSIZE;
2184 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2185 ETH_ALEN, entry->key.acl.eth_dst))
2186 return -EMSGSIZE;
2187 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2188 ETH_ALEN, entry->key.acl.eth_dst_mask))
2189 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002190 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2191 entry->key.acl.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002192 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002193 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2194 entry->key.acl.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002195 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002196 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2197 entry->key.acl.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002198 return -EMSGSIZE;
2199
2200 switch (ntohs(entry->key.acl.eth_type)) {
2201 case ETH_P_IP:
2202 case ETH_P_IPV6:
2203 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2204 entry->key.acl.ip_proto))
2205 return -EMSGSIZE;
2206 if (rocker_tlv_put_u8(desc_info,
2207 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2208 entry->key.acl.ip_proto_mask))
2209 return -EMSGSIZE;
2210 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2211 entry->key.acl.ip_tos & 0x3f))
2212 return -EMSGSIZE;
2213 if (rocker_tlv_put_u8(desc_info,
2214 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2215 entry->key.acl.ip_tos_mask & 0x3f))
2216 return -EMSGSIZE;
2217 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2218 (entry->key.acl.ip_tos & 0xc0) >> 6))
2219 return -EMSGSIZE;
2220 if (rocker_tlv_put_u8(desc_info,
2221 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2222 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2223 return -EMSGSIZE;
2224 break;
2225 }
2226
2227 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2228 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2229 entry->key.acl.group_id))
2230 return -EMSGSIZE;
2231
2232 return 0;
2233}
2234
Simon Horman534ba6a2015-06-01 13:25:04 +09002235static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002236 struct rocker_desc_info *desc_info,
2237 void *priv)
2238{
Simon Hormane5054642015-05-25 14:28:36 +09002239 const struct rocker_flow_tbl_entry *entry = priv;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002240 struct rocker_tlv *cmd_info;
2241 int err = 0;
2242
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002243 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002244 return -EMSGSIZE;
2245 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2246 if (!cmd_info)
2247 return -EMSGSIZE;
2248 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2249 entry->key.tbl_id))
2250 return -EMSGSIZE;
2251 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2252 entry->key.priority))
2253 return -EMSGSIZE;
2254 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2255 return -EMSGSIZE;
2256 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2257 entry->cookie))
2258 return -EMSGSIZE;
2259
2260 switch (entry->key.tbl_id) {
2261 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2262 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2263 break;
2264 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2265 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2266 break;
2267 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2268 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2269 break;
2270 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2271 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2272 break;
2273 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2274 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2275 break;
2276 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2277 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2278 break;
2279 default:
2280 err = -ENOTSUPP;
2281 break;
2282 }
2283
2284 if (err)
2285 return err;
2286
2287 rocker_tlv_nest_end(desc_info, cmd_info);
2288
2289 return 0;
2290}
2291
Simon Horman534ba6a2015-06-01 13:25:04 +09002292static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002293 struct rocker_desc_info *desc_info,
2294 void *priv)
2295{
2296 const struct rocker_flow_tbl_entry *entry = priv;
2297 struct rocker_tlv *cmd_info;
2298
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002299 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002300 return -EMSGSIZE;
2301 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2302 if (!cmd_info)
2303 return -EMSGSIZE;
2304 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2305 entry->cookie))
2306 return -EMSGSIZE;
2307 rocker_tlv_nest_end(desc_info, cmd_info);
2308
2309 return 0;
2310}
2311
2312static int
2313rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2314 struct rocker_group_tbl_entry *entry)
2315{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002316 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002317 ROCKER_GROUP_PORT_GET(entry->group_id)))
2318 return -EMSGSIZE;
2319 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2320 entry->l2_interface.pop_vlan))
2321 return -EMSGSIZE;
2322
2323 return 0;
2324}
2325
2326static int
2327rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002328 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002329{
2330 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2331 entry->l2_rewrite.group_id))
2332 return -EMSGSIZE;
2333 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2334 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2335 ETH_ALEN, entry->l2_rewrite.eth_src))
2336 return -EMSGSIZE;
2337 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2338 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2339 ETH_ALEN, entry->l2_rewrite.eth_dst))
2340 return -EMSGSIZE;
2341 if (entry->l2_rewrite.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002342 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2343 entry->l2_rewrite.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002344 return -EMSGSIZE;
2345
2346 return 0;
2347}
2348
2349static int
2350rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002351 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002352{
2353 int i;
2354 struct rocker_tlv *group_ids;
2355
2356 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2357 entry->group_count))
2358 return -EMSGSIZE;
2359
2360 group_ids = rocker_tlv_nest_start(desc_info,
2361 ROCKER_TLV_OF_DPA_GROUP_IDS);
2362 if (!group_ids)
2363 return -EMSGSIZE;
2364
2365 for (i = 0; i < entry->group_count; i++)
2366 /* Note TLV array is 1-based */
2367 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2368 return -EMSGSIZE;
2369
2370 rocker_tlv_nest_end(desc_info, group_ids);
2371
2372 return 0;
2373}
2374
2375static int
2376rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002377 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002378{
2379 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2380 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2381 ETH_ALEN, entry->l3_unicast.eth_src))
2382 return -EMSGSIZE;
2383 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2384 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2385 ETH_ALEN, entry->l3_unicast.eth_dst))
2386 return -EMSGSIZE;
2387 if (entry->l3_unicast.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002388 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2389 entry->l3_unicast.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002390 return -EMSGSIZE;
2391 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2392 entry->l3_unicast.ttl_check))
2393 return -EMSGSIZE;
2394 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2395 entry->l3_unicast.group_id))
2396 return -EMSGSIZE;
2397
2398 return 0;
2399}
2400
Simon Horman534ba6a2015-06-01 13:25:04 +09002401static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002402 struct rocker_desc_info *desc_info,
2403 void *priv)
2404{
2405 struct rocker_group_tbl_entry *entry = priv;
2406 struct rocker_tlv *cmd_info;
2407 int err = 0;
2408
2409 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2410 return -EMSGSIZE;
2411 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2412 if (!cmd_info)
2413 return -EMSGSIZE;
2414
2415 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2416 entry->group_id))
2417 return -EMSGSIZE;
2418
2419 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2420 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2421 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2422 break;
2423 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2424 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2425 break;
2426 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2427 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2428 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2429 break;
2430 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2431 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2432 break;
2433 default:
2434 err = -ENOTSUPP;
2435 break;
2436 }
2437
2438 if (err)
2439 return err;
2440
2441 rocker_tlv_nest_end(desc_info, cmd_info);
2442
2443 return 0;
2444}
2445
Simon Horman534ba6a2015-06-01 13:25:04 +09002446static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002447 struct rocker_desc_info *desc_info,
2448 void *priv)
2449{
2450 const struct rocker_group_tbl_entry *entry = priv;
2451 struct rocker_tlv *cmd_info;
2452
2453 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2454 return -EMSGSIZE;
2455 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2456 if (!cmd_info)
2457 return -EMSGSIZE;
2458 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2459 entry->group_id))
2460 return -EMSGSIZE;
2461 rocker_tlv_nest_end(desc_info, cmd_info);
2462
2463 return 0;
2464}
2465
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002466/***************************************************
2467 * Flow, group, FDB, internal VLAN and neigh tables
2468 ***************************************************/
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002469
2470static int rocker_init_tbls(struct rocker *rocker)
2471{
2472 hash_init(rocker->flow_tbl);
2473 spin_lock_init(&rocker->flow_tbl_lock);
2474
2475 hash_init(rocker->group_tbl);
2476 spin_lock_init(&rocker->group_tbl_lock);
2477
2478 hash_init(rocker->fdb_tbl);
2479 spin_lock_init(&rocker->fdb_tbl_lock);
2480
2481 hash_init(rocker->internal_vlan_tbl);
2482 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2483
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002484 hash_init(rocker->neigh_tbl);
2485 spin_lock_init(&rocker->neigh_tbl_lock);
2486
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002487 return 0;
2488}
2489
2490static void rocker_free_tbls(struct rocker *rocker)
2491{
2492 unsigned long flags;
2493 struct rocker_flow_tbl_entry *flow_entry;
2494 struct rocker_group_tbl_entry *group_entry;
2495 struct rocker_fdb_tbl_entry *fdb_entry;
2496 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002497 struct rocker_neigh_tbl_entry *neigh_entry;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002498 struct hlist_node *tmp;
2499 int bkt;
2500
2501 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2502 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2503 hash_del(&flow_entry->entry);
2504 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2505
2506 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2507 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2508 hash_del(&group_entry->entry);
2509 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2510
2511 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2512 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2513 hash_del(&fdb_entry->entry);
2514 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2515
2516 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2517 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2518 tmp, internal_vlan_entry, entry)
2519 hash_del(&internal_vlan_entry->entry);
2520 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002521
2522 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2523 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2524 hash_del(&neigh_entry->entry);
2525 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002526}
2527
2528static struct rocker_flow_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002529rocker_flow_tbl_find(const struct rocker *rocker,
2530 const struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002531{
2532 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002533 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002534
2535 hash_for_each_possible(rocker->flow_tbl, found,
2536 entry, match->key_crc32) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002537 if (memcmp(&found->key, &match->key, key_len) == 0)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002538 return found;
2539 }
2540
2541 return NULL;
2542}
2543
2544static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002545 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002546 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002547{
2548 struct rocker *rocker = rocker_port->rocker;
2549 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002550 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002551 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002552
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002553 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002554
Scott Feldman179f9a22015-06-12 21:35:46 -07002555 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002556
2557 found = rocker_flow_tbl_find(rocker, match);
2558
2559 if (found) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002560 match->cookie = found->cookie;
Jiri Pirko76c6f942015-09-24 10:02:44 +02002561 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002562 hash_del(&found->entry);
Jiri Pirkob15edf82016-02-16 15:14:39 +01002563 rocker_kfree(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002564 found = match;
2565 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002566 } else {
2567 found = match;
2568 found->cookie = rocker->flow_tbl_next_cookie++;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002569 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002570 }
2571
Jiri Pirko76c6f942015-09-24 10:02:44 +02002572 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002573 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002574
Scott Feldman179f9a22015-06-12 21:35:46 -07002575 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002576
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002577 if (!switchdev_trans_ph_prepare(trans))
2578 return rocker_cmd_exec(rocker_port, flags,
2579 rocker_cmd_flow_tbl_add,
2580 found, NULL, NULL);
2581 return 0;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002582}
2583
2584static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002585 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002586 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002587{
2588 struct rocker *rocker = rocker_port->rocker;
2589 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002590 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002591 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002592 int err = 0;
2593
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002594 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002595
Scott Feldman179f9a22015-06-12 21:35:46 -07002596 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002597
2598 found = rocker_flow_tbl_find(rocker, match);
2599
2600 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002601 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002602 hash_del(&found->entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002603 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002604 }
2605
Scott Feldman179f9a22015-06-12 21:35:46 -07002606 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002607
Jiri Pirkob15edf82016-02-16 15:14:39 +01002608 rocker_kfree(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002609
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002610 if (found) {
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002611 if (!switchdev_trans_ph_prepare(trans))
2612 err = rocker_cmd_exec(rocker_port, flags,
2613 rocker_cmd_flow_tbl_del,
2614 found, NULL, NULL);
Jiri Pirkob15edf82016-02-16 15:14:39 +01002615 rocker_kfree(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002616 }
2617
2618 return err;
2619}
2620
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002621static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002622 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002623 struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002624{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002625 if (flags & ROCKER_OP_FLAG_REMOVE)
Jiri Pirko76c6f942015-09-24 10:02:44 +02002626 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002627 else
Jiri Pirko76c6f942015-09-24 10:02:44 +02002628 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002629}
2630
2631static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002632 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002633 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002634 enum rocker_of_dpa_table_id goto_tbl)
2635{
2636 struct rocker_flow_tbl_entry *entry;
2637
Jiri Pirkob15edf82016-02-16 15:14:39 +01002638 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002639 if (!entry)
2640 return -ENOMEM;
2641
2642 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2643 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002644 entry->key.ig_port.in_pport = in_pport;
2645 entry->key.ig_port.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002646 entry->key.ig_port.goto_tbl = goto_tbl;
2647
Jiri Pirko76c6f942015-09-24 10:02:44 +02002648 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002649}
2650
2651static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002652 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002653 u32 in_pport, __be16 vlan_id,
2654 __be16 vlan_id_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002655 enum rocker_of_dpa_table_id goto_tbl,
2656 bool untagged, __be16 new_vlan_id)
2657{
2658 struct rocker_flow_tbl_entry *entry;
2659
Jiri Pirkob15edf82016-02-16 15:14:39 +01002660 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002661 if (!entry)
2662 return -ENOMEM;
2663
2664 entry->key.priority = ROCKER_PRIORITY_VLAN;
2665 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002666 entry->key.vlan.in_pport = in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002667 entry->key.vlan.vlan_id = vlan_id;
2668 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2669 entry->key.vlan.goto_tbl = goto_tbl;
2670
2671 entry->key.vlan.untagged = untagged;
2672 entry->key.vlan.new_vlan_id = new_vlan_id;
2673
Jiri Pirko76c6f942015-09-24 10:02:44 +02002674 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002675}
2676
2677static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002678 struct switchdev_trans *trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002679 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002680 __be16 eth_type, const u8 *eth_dst,
2681 const u8 *eth_dst_mask, __be16 vlan_id,
2682 __be16 vlan_id_mask, bool copy_to_cpu,
2683 int flags)
2684{
2685 struct rocker_flow_tbl_entry *entry;
2686
Jiri Pirkob15edf82016-02-16 15:14:39 +01002687 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002688 if (!entry)
2689 return -ENOMEM;
2690
2691 if (is_multicast_ether_addr(eth_dst)) {
2692 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2693 entry->key.term_mac.goto_tbl =
2694 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2695 } else {
2696 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2697 entry->key.term_mac.goto_tbl =
2698 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2699 }
2700
2701 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002702 entry->key.term_mac.in_pport = in_pport;
2703 entry->key.term_mac.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002704 entry->key.term_mac.eth_type = eth_type;
2705 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2706 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2707 entry->key.term_mac.vlan_id = vlan_id;
2708 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2709 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2710
Jiri Pirko76c6f942015-09-24 10:02:44 +02002711 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002712}
2713
2714static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002715 struct switchdev_trans *trans, int flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002716 const u8 *eth_dst, const u8 *eth_dst_mask,
2717 __be16 vlan_id, u32 tunnel_id,
2718 enum rocker_of_dpa_table_id goto_tbl,
2719 u32 group_id, bool copy_to_cpu)
2720{
2721 struct rocker_flow_tbl_entry *entry;
2722 u32 priority;
2723 bool vlan_bridging = !!vlan_id;
2724 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2725 bool wild = false;
2726
Jiri Pirkob15edf82016-02-16 15:14:39 +01002727 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002728 if (!entry)
2729 return -ENOMEM;
2730
2731 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2732
2733 if (eth_dst) {
2734 entry->key.bridge.has_eth_dst = 1;
2735 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2736 }
2737 if (eth_dst_mask) {
2738 entry->key.bridge.has_eth_dst_mask = 1;
2739 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
Simon Horman629161f2015-04-30 15:21:29 +09002740 if (!ether_addr_equal(eth_dst_mask, ff_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002741 wild = true;
2742 }
2743
2744 priority = ROCKER_PRIORITY_UNKNOWN;
Thomas Graf51ace882014-11-28 14:34:32 +01002745 if (vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002746 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002747 else if (vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002748 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002749 else if (vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002750 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
Thomas Graf51ace882014-11-28 14:34:32 +01002751 else if (!vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002752 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002753 else if (!vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002754 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002755 else if (!vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002756 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2757
2758 entry->key.priority = priority;
2759 entry->key.bridge.vlan_id = vlan_id;
2760 entry->key.bridge.tunnel_id = tunnel_id;
2761 entry->key.bridge.goto_tbl = goto_tbl;
2762 entry->key.bridge.group_id = group_id;
2763 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2764
Jiri Pirko76c6f942015-09-24 10:02:44 +02002765 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002766}
2767
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002768static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002769 struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002770 __be16 eth_type, __be32 dst,
2771 __be32 dst_mask, u32 priority,
2772 enum rocker_of_dpa_table_id goto_tbl,
2773 u32 group_id, int flags)
2774{
2775 struct rocker_flow_tbl_entry *entry;
2776
Jiri Pirkob15edf82016-02-16 15:14:39 +01002777 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002778 if (!entry)
2779 return -ENOMEM;
2780
2781 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2782 entry->key.priority = priority;
2783 entry->key.ucast_routing.eth_type = eth_type;
2784 entry->key.ucast_routing.dst4 = dst;
2785 entry->key.ucast_routing.dst4_mask = dst_mask;
2786 entry->key.ucast_routing.goto_tbl = goto_tbl;
2787 entry->key.ucast_routing.group_id = group_id;
2788 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2789 ucast_routing.group_id);
2790
Jiri Pirko76c6f942015-09-24 10:02:44 +02002791 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002792}
2793
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002794static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002795 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002796 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002797 const u8 *eth_src, const u8 *eth_src_mask,
2798 const u8 *eth_dst, const u8 *eth_dst_mask,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002799 __be16 eth_type, __be16 vlan_id,
2800 __be16 vlan_id_mask, u8 ip_proto,
2801 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002802 u32 group_id)
2803{
2804 u32 priority;
2805 struct rocker_flow_tbl_entry *entry;
2806
Jiri Pirkob15edf82016-02-16 15:14:39 +01002807 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002808 if (!entry)
2809 return -ENOMEM;
2810
2811 priority = ROCKER_PRIORITY_ACL_NORMAL;
2812 if (eth_dst && eth_dst_mask) {
Simon Horman629161f2015-04-30 15:21:29 +09002813 if (ether_addr_equal(eth_dst_mask, mcast_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002814 priority = ROCKER_PRIORITY_ACL_DFLT;
2815 else if (is_link_local_ether_addr(eth_dst))
2816 priority = ROCKER_PRIORITY_ACL_CTRL;
2817 }
2818
2819 entry->key.priority = priority;
2820 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002821 entry->key.acl.in_pport = in_pport;
2822 entry->key.acl.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002823
2824 if (eth_src)
2825 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2826 if (eth_src_mask)
2827 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2828 if (eth_dst)
2829 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2830 if (eth_dst_mask)
2831 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2832
2833 entry->key.acl.eth_type = eth_type;
2834 entry->key.acl.vlan_id = vlan_id;
2835 entry->key.acl.vlan_id_mask = vlan_id_mask;
2836 entry->key.acl.ip_proto = ip_proto;
2837 entry->key.acl.ip_proto_mask = ip_proto_mask;
2838 entry->key.acl.ip_tos = ip_tos;
2839 entry->key.acl.ip_tos_mask = ip_tos_mask;
2840 entry->key.acl.group_id = group_id;
2841
Jiri Pirko76c6f942015-09-24 10:02:44 +02002842 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002843}
2844
2845static struct rocker_group_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002846rocker_group_tbl_find(const struct rocker *rocker,
2847 const struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002848{
2849 struct rocker_group_tbl_entry *found;
2850
2851 hash_for_each_possible(rocker->group_tbl, found,
2852 entry, match->group_id) {
2853 if (found->group_id == match->group_id)
2854 return found;
2855 }
2856
2857 return NULL;
2858}
2859
Jiri Pirko76c6f942015-09-24 10:02:44 +02002860static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002861 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002862{
2863 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2864 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2865 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
Jiri Pirkob15edf82016-02-16 15:14:39 +01002866 rocker_kfree(trans, entry->group_ids);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002867 break;
2868 default:
2869 break;
2870 }
Jiri Pirkob15edf82016-02-16 15:14:39 +01002871 rocker_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002872}
2873
2874static int rocker_group_tbl_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002875 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002876 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002877{
2878 struct rocker *rocker = rocker_port->rocker;
2879 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002880 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002881
Scott Feldman179f9a22015-06-12 21:35:46 -07002882 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002883
2884 found = rocker_group_tbl_find(rocker, match);
2885
2886 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002887 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002888 hash_del(&found->entry);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002889 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002890 found = match;
2891 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2892 } else {
2893 found = match;
2894 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2895 }
2896
Jiri Pirko76c6f942015-09-24 10:02:44 +02002897 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002898 hash_add(rocker->group_tbl, &found->entry, found->group_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002899
Scott Feldman179f9a22015-06-12 21:35:46 -07002900 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002901
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002902 if (!switchdev_trans_ph_prepare(trans))
2903 return rocker_cmd_exec(rocker_port, flags,
2904 rocker_cmd_group_tbl_add,
2905 found, NULL, NULL);
2906 return 0;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002907}
2908
2909static int rocker_group_tbl_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002910 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002911 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002912{
2913 struct rocker *rocker = rocker_port->rocker;
2914 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002915 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002916 int err = 0;
2917
Scott Feldman179f9a22015-06-12 21:35:46 -07002918 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002919
2920 found = rocker_group_tbl_find(rocker, match);
2921
2922 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002923 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002924 hash_del(&found->entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002925 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2926 }
2927
Scott Feldman179f9a22015-06-12 21:35:46 -07002928 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002929
Jiri Pirko76c6f942015-09-24 10:02:44 +02002930 rocker_group_tbl_entry_free(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002931
2932 if (found) {
Jiri Pirkoae3907e2016-02-16 15:14:48 +01002933 if (!switchdev_trans_ph_prepare(trans))
2934 err = rocker_cmd_exec(rocker_port, flags,
2935 rocker_cmd_group_tbl_del,
2936 found, NULL, NULL);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002937 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002938 }
2939
2940 return err;
2941}
2942
2943static int rocker_group_tbl_do(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002944 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002945 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002946{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002947 if (flags & ROCKER_OP_FLAG_REMOVE)
Jiri Pirko76c6f942015-09-24 10:02:44 +02002948 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002949 else
Jiri Pirko76c6f942015-09-24 10:02:44 +02002950 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002951}
2952
2953static int rocker_group_l2_interface(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002954 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002955 __be16 vlan_id, u32 out_pport,
2956 int pop_vlan)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002957{
2958 struct rocker_group_tbl_entry *entry;
2959
Jiri Pirkob15edf82016-02-16 15:14:39 +01002960 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002961 if (!entry)
2962 return -ENOMEM;
2963
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002964 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002965 entry->l2_interface.pop_vlan = pop_vlan;
2966
Jiri Pirko76c6f942015-09-24 10:02:44 +02002967 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002968}
2969
2970static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002971 struct switchdev_trans *trans,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002972 int flags, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002973 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002974{
2975 struct rocker_group_tbl_entry *entry;
2976
Jiri Pirkob15edf82016-02-16 15:14:39 +01002977 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002978 if (!entry)
2979 return -ENOMEM;
2980
2981 entry->group_id = group_id;
2982 entry->group_count = group_count;
2983
Jiri Pirkob15edf82016-02-16 15:14:39 +01002984 entry->group_ids = rocker_kcalloc(trans, flags,
2985 group_count, sizeof(u32));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002986 if (!entry->group_ids) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01002987 rocker_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002988 return -ENOMEM;
2989 }
2990 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2991
Jiri Pirko76c6f942015-09-24 10:02:44 +02002992 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002993}
2994
2995static int rocker_group_l2_flood(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002996 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002997 __be16 vlan_id, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002998 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002999{
Jiri Pirko76c6f942015-09-24 10:02:44 +02003000 return rocker_group_l2_fan_out(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003001 group_count, group_ids,
3002 group_id);
3003}
3004
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003005static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003006 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003007 u32 index, const u8 *src_mac, const u8 *dst_mac,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003008 __be16 vlan_id, bool ttl_check, u32 pport)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003009{
3010 struct rocker_group_tbl_entry *entry;
3011
Jiri Pirkob15edf82016-02-16 15:14:39 +01003012 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003013 if (!entry)
3014 return -ENOMEM;
3015
3016 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
3017 if (src_mac)
3018 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
3019 if (dst_mac)
3020 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
3021 entry->l3_unicast.vlan_id = vlan_id;
3022 entry->l3_unicast.ttl_check = ttl_check;
3023 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
3024
Jiri Pirko76c6f942015-09-24 10:02:44 +02003025 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003026}
3027
3028static struct rocker_neigh_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003029rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003030{
3031 struct rocker_neigh_tbl_entry *found;
3032
Scott Feldman0f43deb2015-03-06 15:54:51 -08003033 hash_for_each_possible(rocker->neigh_tbl, found,
3034 entry, be32_to_cpu(ip_addr))
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003035 if (found->ip_addr == ip_addr)
3036 return found;
3037
3038 return NULL;
3039}
3040
3041static void _rocker_neigh_add(struct rocker *rocker,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003042 struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003043 struct rocker_neigh_tbl_entry *entry)
3044{
Jiri Pirko76c6f942015-09-24 10:02:44 +02003045 if (!switchdev_trans_ph_commit(trans))
Scott Feldman4d81db42015-06-12 21:24:40 -07003046 entry->index = rocker->neigh_tbl_next_index++;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003047 if (switchdev_trans_ph_prepare(trans))
Simon Horman550ecc92015-05-21 12:40:16 +09003048 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003049 entry->ref_count++;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003050 hash_add(rocker->neigh_tbl, &entry->entry,
3051 be32_to_cpu(entry->ip_addr));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003052}
3053
Jiri Pirko76c6f942015-09-24 10:02:44 +02003054static void _rocker_neigh_del(struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003055 struct rocker_neigh_tbl_entry *entry)
3056{
Jiri Pirko76c6f942015-09-24 10:02:44 +02003057 if (switchdev_trans_ph_prepare(trans))
Simon Horman550ecc92015-05-21 12:40:16 +09003058 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003059 if (--entry->ref_count == 0) {
3060 hash_del(&entry->entry);
Jiri Pirkob15edf82016-02-16 15:14:39 +01003061 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003062 }
3063}
3064
Scott Feldmanc4f20322015-05-10 09:47:50 -07003065static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003066 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09003067 const u8 *eth_dst, bool ttl_check)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003068{
3069 if (eth_dst) {
3070 ether_addr_copy(entry->eth_dst, eth_dst);
3071 entry->ttl_check = ttl_check;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003072 } else if (!switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003073 entry->ref_count++;
3074 }
3075}
3076
3077static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003078 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09003079 int flags, __be32 ip_addr, const u8 *eth_dst)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003080{
3081 struct rocker *rocker = rocker_port->rocker;
3082 struct rocker_neigh_tbl_entry *entry;
3083 struct rocker_neigh_tbl_entry *found;
3084 unsigned long lock_flags;
3085 __be16 eth_type = htons(ETH_P_IP);
3086 enum rocker_of_dpa_table_id goto_tbl =
3087 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3088 u32 group_id;
3089 u32 priority = 0;
3090 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3091 bool updating;
3092 bool removing;
3093 int err = 0;
3094
Jiri Pirkob15edf82016-02-16 15:14:39 +01003095 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003096 if (!entry)
3097 return -ENOMEM;
3098
3099 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3100
3101 found = rocker_neigh_tbl_find(rocker, ip_addr);
3102
3103 updating = found && adding;
3104 removing = found && !adding;
3105 adding = !found && adding;
3106
3107 if (adding) {
3108 entry->ip_addr = ip_addr;
3109 entry->dev = rocker_port->dev;
3110 ether_addr_copy(entry->eth_dst, eth_dst);
3111 entry->ttl_check = true;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003112 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003113 } else if (removing) {
3114 memcpy(entry, found, sizeof(*entry));
Jiri Pirko76c6f942015-09-24 10:02:44 +02003115 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003116 } else if (updating) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003117 _rocker_neigh_update(found, trans, eth_dst, true);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003118 memcpy(entry, found, sizeof(*entry));
3119 } else {
3120 err = -ENOENT;
3121 }
3122
3123 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3124
3125 if (err)
3126 goto err_out;
3127
3128 /* For each active neighbor, we have an L3 unicast group and
3129 * a /32 route to the neighbor, which uses the L3 unicast
3130 * group. The L3 unicast group can also be referred to by
3131 * other routes' nexthops.
3132 */
3133
Jiri Pirko76c6f942015-09-24 10:02:44 +02003134 err = rocker_group_l3_unicast(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003135 entry->index,
3136 rocker_port->dev->dev_addr,
3137 entry->eth_dst,
3138 rocker_port->internal_vlan_id,
3139 entry->ttl_check,
3140 rocker_port->pport);
3141 if (err) {
3142 netdev_err(rocker_port->dev,
3143 "Error (%d) L3 unicast group index %d\n",
3144 err, entry->index);
3145 goto err_out;
3146 }
3147
3148 if (adding || removing) {
3149 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003150 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003151 eth_type, ip_addr,
3152 inet_make_mask(32),
3153 priority, goto_tbl,
3154 group_id, flags);
3155
3156 if (err)
3157 netdev_err(rocker_port->dev,
3158 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3159 err, &entry->ip_addr, group_id);
3160 }
3161
3162err_out:
3163 if (!adding)
Jiri Pirkob15edf82016-02-16 15:14:39 +01003164 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003165
3166 return err;
3167}
3168
3169static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003170 struct switchdev_trans *trans,
3171 __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003172{
3173 struct net_device *dev = rocker_port->dev;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003174 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003175 int err = 0;
3176
Ying Xue4133fc02015-05-15 12:53:21 +08003177 if (!n) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003178 n = neigh_create(&arp_tbl, &ip_addr, dev);
Ying Xue4133fc02015-05-15 12:53:21 +08003179 if (IS_ERR(n))
3180 return IS_ERR(n);
3181 }
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003182
3183 /* If the neigh is already resolved, then go ahead and
3184 * install the entry, otherwise start the ARP process to
3185 * resolve the neigh.
3186 */
3187
3188 if (n->nud_state & NUD_VALID)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003189 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003190 ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003191 else
3192 neigh_event_send(n, NULL);
3193
Ying Xue4133fc02015-05-15 12:53:21 +08003194 neigh_release(n);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003195 return err;
3196}
3197
Scott Feldmanc4f20322015-05-10 09:47:50 -07003198static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003199 struct switchdev_trans *trans, int flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003200 __be32 ip_addr, u32 *index)
3201{
3202 struct rocker *rocker = rocker_port->rocker;
3203 struct rocker_neigh_tbl_entry *entry;
3204 struct rocker_neigh_tbl_entry *found;
3205 unsigned long lock_flags;
3206 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3207 bool updating;
3208 bool removing;
3209 bool resolved = true;
3210 int err = 0;
3211
Jiri Pirkob15edf82016-02-16 15:14:39 +01003212 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003213 if (!entry)
3214 return -ENOMEM;
3215
3216 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3217
3218 found = rocker_neigh_tbl_find(rocker, ip_addr);
3219 if (found)
3220 *index = found->index;
3221
3222 updating = found && adding;
3223 removing = found && !adding;
3224 adding = !found && adding;
3225
3226 if (adding) {
3227 entry->ip_addr = ip_addr;
3228 entry->dev = rocker_port->dev;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003229 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003230 *index = entry->index;
3231 resolved = false;
3232 } else if (removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003233 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003234 } else if (updating) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003235 _rocker_neigh_update(found, trans, NULL, false);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003236 resolved = !is_zero_ether_addr(found->eth_dst);
3237 } else {
3238 err = -ENOENT;
3239 }
3240
3241 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3242
3243 if (!adding)
Jiri Pirkob15edf82016-02-16 15:14:39 +01003244 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003245
3246 if (err)
3247 return err;
3248
3249 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3250
3251 if (!resolved)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003252 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003253
3254 return err;
3255}
3256
Scott Feldman6c707942014-11-28 14:34:28 +01003257static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003258 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003259 int flags, __be16 vlan_id)
3260{
3261 struct rocker_port *p;
Simon Hormane5054642015-05-25 14:28:36 +09003262 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003263 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
Scott Feldman04f49fa2015-03-15 23:04:46 -07003264 u32 *group_ids;
Scott Feldman6c707942014-11-28 14:34:28 +01003265 u8 group_count = 0;
Scott Feldman04f49fa2015-03-15 23:04:46 -07003266 int err = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01003267 int i;
3268
Jiri Pirkob15edf82016-02-16 15:14:39 +01003269 group_ids = rocker_kcalloc(trans, flags,
3270 rocker->port_count, sizeof(u32));
Scott Feldman04f49fa2015-03-15 23:04:46 -07003271 if (!group_ids)
3272 return -ENOMEM;
3273
Scott Feldman6c707942014-11-28 14:34:28 +01003274 /* Adjust the flood group for this VLAN. The flood group
3275 * references an L2 interface group for each port in this
3276 * VLAN.
3277 */
3278
3279 for (i = 0; i < rocker->port_count; i++) {
3280 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003281 if (!p)
3282 continue;
Scott Feldman6c707942014-11-28 14:34:28 +01003283 if (!rocker_port_is_bridged(p))
3284 continue;
3285 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3286 group_ids[group_count++] =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003287 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003288 }
3289 }
3290
3291 /* If there are no bridged ports in this VLAN, we're done */
3292 if (group_count == 0)
Scott Feldman04f49fa2015-03-15 23:04:46 -07003293 goto no_ports_in_vlan;
Scott Feldman6c707942014-11-28 14:34:28 +01003294
Jiri Pirko76c6f942015-09-24 10:02:44 +02003295 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003296 group_count, group_ids, group_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003297 if (err)
3298 netdev_err(rocker_port->dev,
3299 "Error (%d) port VLAN l2 flood group\n", err);
3300
Scott Feldman04f49fa2015-03-15 23:04:46 -07003301no_ports_in_vlan:
Jiri Pirkob15edf82016-02-16 15:14:39 +01003302 rocker_kfree(trans, group_ids);
Scott Feldman6c707942014-11-28 14:34:28 +01003303 return err;
3304}
3305
3306static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003307 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003308 __be16 vlan_id, bool pop_vlan)
Scott Feldman6c707942014-11-28 14:34:28 +01003309{
Simon Hormane5054642015-05-25 14:28:36 +09003310 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003311 struct rocker_port *p;
3312 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003313 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003314 int ref = 0;
3315 int err;
3316 int i;
3317
3318 /* An L2 interface group for this port in this VLAN, but
3319 * only when port STP state is LEARNING|FORWARDING.
3320 */
3321
3322 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3323 rocker_port->stp_state == BR_STATE_FORWARDING) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003324 out_pport = rocker_port->pport;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003325 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003326 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003327 if (err) {
3328 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003329 "Error (%d) port VLAN l2 group for pport %d\n",
3330 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003331 return err;
3332 }
3333 }
3334
3335 /* An L2 interface group for this VLAN to CPU port.
3336 * Add when first port joins this VLAN and destroy when
3337 * last port leaves this VLAN.
3338 */
3339
3340 for (i = 0; i < rocker->port_count; i++) {
3341 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003342 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003343 ref++;
3344 }
3345
3346 if ((!adding || ref != 1) && (adding || ref != 0))
3347 return 0;
3348
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003349 out_pport = 0;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003350 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003351 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003352 if (err) {
3353 netdev_err(rocker_port->dev,
3354 "Error (%d) port VLAN l2 group for CPU port\n", err);
3355 return err;
3356 }
3357
3358 return 0;
3359}
3360
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003361static struct rocker_ctrl {
3362 const u8 *eth_dst;
3363 const u8 *eth_dst_mask;
Jiri Pirko11e6c652014-12-03 14:14:54 +01003364 __be16 eth_type;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003365 bool acl;
3366 bool bridge;
3367 bool term;
3368 bool copy_to_cpu;
3369} rocker_ctrls[] = {
3370 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3371 /* pass link local multicast pkts up to CPU for filtering */
3372 .eth_dst = ll_mac,
3373 .eth_dst_mask = ll_mask,
3374 .acl = true,
3375 },
3376 [ROCKER_CTRL_LOCAL_ARP] = {
3377 /* pass local ARP pkts up to CPU */
3378 .eth_dst = zero_mac,
3379 .eth_dst_mask = zero_mac,
3380 .eth_type = htons(ETH_P_ARP),
3381 .acl = true,
3382 },
3383 [ROCKER_CTRL_IPV4_MCAST] = {
3384 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3385 .eth_dst = ipv4_mcast,
3386 .eth_dst_mask = ipv4_mask,
3387 .eth_type = htons(ETH_P_IP),
3388 .term = true,
3389 .copy_to_cpu = true,
3390 },
3391 [ROCKER_CTRL_IPV6_MCAST] = {
3392 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3393 .eth_dst = ipv6_mcast,
3394 .eth_dst_mask = ipv6_mask,
3395 .eth_type = htons(ETH_P_IPV6),
3396 .term = true,
3397 .copy_to_cpu = true,
3398 },
3399 [ROCKER_CTRL_DFLT_BRIDGING] = {
3400 /* flood any pkts on vlan */
3401 .bridge = true,
3402 .copy_to_cpu = true,
3403 },
Simon Horman82549732015-07-16 10:39:14 +09003404 [ROCKER_CTRL_DFLT_OVS] = {
3405 /* pass all pkts up to CPU */
3406 .eth_dst = zero_mac,
3407 .eth_dst_mask = zero_mac,
3408 .acl = true,
3409 },
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003410};
3411
3412static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003413 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003414 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003415{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003416 u32 in_pport = rocker_port->pport;
3417 u32 in_pport_mask = 0xffffffff;
3418 u32 out_pport = 0;
Simon Hormane5054642015-05-25 14:28:36 +09003419 const u8 *eth_src = NULL;
3420 const u8 *eth_src_mask = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003421 __be16 vlan_id_mask = htons(0xffff);
3422 u8 ip_proto = 0;
3423 u8 ip_proto_mask = 0;
3424 u8 ip_tos = 0;
3425 u8 ip_tos_mask = 0;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003426 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003427 int err;
3428
Jiri Pirko76c6f942015-09-24 10:02:44 +02003429 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003430 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003431 eth_src, eth_src_mask,
3432 ctrl->eth_dst, ctrl->eth_dst_mask,
3433 ctrl->eth_type,
3434 vlan_id, vlan_id_mask,
3435 ip_proto, ip_proto_mask,
3436 ip_tos, ip_tos_mask,
3437 group_id);
3438
3439 if (err)
3440 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3441
3442 return err;
3443}
3444
Scott Feldman6c707942014-11-28 14:34:28 +01003445static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003446 struct switchdev_trans *trans,
3447 int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003448 const struct rocker_ctrl *ctrl,
Scott Feldman6c707942014-11-28 14:34:28 +01003449 __be16 vlan_id)
3450{
3451 enum rocker_of_dpa_table_id goto_tbl =
3452 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3453 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3454 u32 tunnel_id = 0;
3455 int err;
3456
3457 if (!rocker_port_is_bridged(rocker_port))
3458 return 0;
3459
Jiri Pirko76c6f942015-09-24 10:02:44 +02003460 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003461 ctrl->eth_dst, ctrl->eth_dst_mask,
3462 vlan_id, tunnel_id,
3463 goto_tbl, group_id, ctrl->copy_to_cpu);
3464
3465 if (err)
3466 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3467
3468 return err;
3469}
3470
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003471static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003472 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003473 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003474{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003475 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003476 __be16 vlan_id_mask = htons(0xffff);
3477 int err;
3478
3479 if (ntohs(vlan_id) == 0)
3480 vlan_id = rocker_port->internal_vlan_id;
3481
Jiri Pirko76c6f942015-09-24 10:02:44 +02003482 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003483 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003484 ctrl->eth_type, ctrl->eth_dst,
3485 ctrl->eth_dst_mask, vlan_id,
3486 vlan_id_mask, ctrl->copy_to_cpu,
3487 flags);
3488
3489 if (err)
3490 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3491
3492 return err;
3493}
3494
Scott Feldmanc4f20322015-05-10 09:47:50 -07003495static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003496 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003497 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003498{
3499 if (ctrl->acl)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003500 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003501 ctrl, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003502 if (ctrl->bridge)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003503 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003504 ctrl, vlan_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003505
3506 if (ctrl->term)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003507 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003508 ctrl, vlan_id);
3509
3510 return -EOPNOTSUPP;
3511}
3512
3513static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003514 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003515 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003516{
3517 int err = 0;
3518 int i;
3519
3520 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3521 if (rocker_port->ctrls[i]) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003522 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003523 &rocker_ctrls[i], vlan_id);
3524 if (err)
3525 return err;
3526 }
3527 }
3528
3529 return err;
3530}
3531
Scott Feldmanc4f20322015-05-10 09:47:50 -07003532static int rocker_port_ctrl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003533 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003534 const struct rocker_ctrl *ctrl)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003535{
3536 u16 vid;
3537 int err = 0;
3538
3539 for (vid = 1; vid < VLAN_N_VID; vid++) {
3540 if (!test_bit(vid, rocker_port->vlan_bitmap))
3541 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003542 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003543 ctrl, htons(vid));
3544 if (err)
3545 break;
3546 }
3547
3548 return err;
3549}
3550
Scott Feldmanc4f20322015-05-10 09:47:50 -07003551static int rocker_port_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003552 struct switchdev_trans *trans, int flags, u16 vid)
Scott Feldman6c707942014-11-28 14:34:28 +01003553{
3554 enum rocker_of_dpa_table_id goto_tbl =
3555 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003556 u32 in_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003557 __be16 vlan_id = htons(vid);
3558 __be16 vlan_id_mask = htons(0xffff);
3559 __be16 internal_vlan_id;
3560 bool untagged;
3561 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3562 int err;
3563
3564 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3565
Scott Feldman9228ad22015-05-10 09:47:54 -07003566 if (adding && test_bit(ntohs(internal_vlan_id),
3567 rocker_port->vlan_bitmap))
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003568 return 0; /* already added */
Scott Feldman9228ad22015-05-10 09:47:54 -07003569 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3570 rocker_port->vlan_bitmap))
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003571 return 0; /* already removed */
Scott Feldman6c707942014-11-28 14:34:28 +01003572
Scott Feldman9228ad22015-05-10 09:47:54 -07003573 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3574
Scott Feldman6c707942014-11-28 14:34:28 +01003575 if (adding) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003576 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003577 internal_vlan_id);
3578 if (err) {
3579 netdev_err(rocker_port->dev,
3580 "Error (%d) port ctrl vlan add\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003581 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003582 }
3583 }
3584
Jiri Pirko76c6f942015-09-24 10:02:44 +02003585 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003586 internal_vlan_id, untagged);
3587 if (err) {
3588 netdev_err(rocker_port->dev,
3589 "Error (%d) port VLAN l2 groups\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003590 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003591 }
3592
Jiri Pirko76c6f942015-09-24 10:02:44 +02003593 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003594 internal_vlan_id);
3595 if (err) {
3596 netdev_err(rocker_port->dev,
3597 "Error (%d) port VLAN l2 flood group\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003598 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003599 }
3600
Jiri Pirko76c6f942015-09-24 10:02:44 +02003601 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003602 in_pport, vlan_id, vlan_id_mask,
Scott Feldman6c707942014-11-28 14:34:28 +01003603 goto_tbl, untagged, internal_vlan_id);
3604 if (err)
3605 netdev_err(rocker_port->dev,
3606 "Error (%d) port VLAN table\n", err);
3607
Scott Feldman9228ad22015-05-10 09:47:54 -07003608err_out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02003609 if (switchdev_trans_ph_prepare(trans))
Scott Feldman9228ad22015-05-10 09:47:54 -07003610 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3611
Scott Feldman6c707942014-11-28 14:34:28 +01003612 return err;
3613}
3614
Scott Feldmanc4f20322015-05-10 09:47:50 -07003615static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003616 struct switchdev_trans *trans, int flags)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003617{
3618 enum rocker_of_dpa_table_id goto_tbl;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003619 u32 in_pport;
3620 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003621 int err;
3622
3623 /* Normal Ethernet Frames. Matches pkts from any local physical
3624 * ports. Goto VLAN tbl.
3625 */
3626
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003627 in_pport = 0;
3628 in_pport_mask = 0xffff0000;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003629 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3630
Jiri Pirko76c6f942015-09-24 10:02:44 +02003631 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003632 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003633 goto_tbl);
3634 if (err)
3635 netdev_err(rocker_port->dev,
3636 "Error (%d) ingress port table entry\n", err);
3637
3638 return err;
3639}
3640
Scott Feldman6c707942014-11-28 14:34:28 +01003641struct rocker_fdb_learn_work {
3642 struct work_struct work;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003643 struct rocker_port *rocker_port;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003644 struct switchdev_trans *trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003645 int flags;
3646 u8 addr[ETH_ALEN];
3647 u16 vid;
3648};
3649
3650static void rocker_port_fdb_learn_work(struct work_struct *work)
3651{
Simon Hormane5054642015-05-25 14:28:36 +09003652 const struct rocker_fdb_learn_work *lw =
Scott Feldman6c707942014-11-28 14:34:28 +01003653 container_of(work, struct rocker_fdb_learn_work, work);
3654 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3655 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003656 struct switchdev_notifier_fdb_info info;
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003657
3658 info.addr = lw->addr;
3659 info.vid = lw->vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003660
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +01003661 rtnl_lock();
Thomas Graf51ace882014-11-28 14:34:32 +01003662 if (learned && removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003663 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003664 lw->rocker_port->dev, &info.info);
Thomas Graf51ace882014-11-28 14:34:32 +01003665 else if (learned && !removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003666 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003667 lw->rocker_port->dev, &info.info);
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +01003668 rtnl_unlock();
Scott Feldman6c707942014-11-28 14:34:28 +01003669
Jiri Pirkob15edf82016-02-16 15:14:39 +01003670 rocker_kfree(lw->trans, work);
Scott Feldman6c707942014-11-28 14:34:28 +01003671}
3672
3673static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003674 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003675 const u8 *addr, __be16 vlan_id)
Scott Feldman6c707942014-11-28 14:34:28 +01003676{
3677 struct rocker_fdb_learn_work *lw;
3678 enum rocker_of_dpa_table_id goto_tbl =
3679 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003680 u32 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003681 u32 tunnel_id = 0;
3682 u32 group_id = ROCKER_GROUP_NONE;
Scott Feldman5111f802014-11-28 14:34:30 +01003683 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
Scott Feldman6c707942014-11-28 14:34:28 +01003684 bool copy_to_cpu = false;
3685 int err;
3686
3687 if (rocker_port_is_bridged(rocker_port))
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003688 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003689
3690 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003691 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003692 NULL, vlan_id, tunnel_id, goto_tbl,
Scott Feldman6c707942014-11-28 14:34:28 +01003693 group_id, copy_to_cpu);
3694 if (err)
3695 return err;
3696 }
3697
Scott Feldman5111f802014-11-28 14:34:30 +01003698 if (!syncing)
3699 return 0;
3700
Scott Feldman6c707942014-11-28 14:34:28 +01003701 if (!rocker_port_is_bridged(rocker_port))
3702 return 0;
3703
Jiri Pirkob15edf82016-02-16 15:14:39 +01003704 lw = rocker_kzalloc(trans, flags, sizeof(*lw));
Scott Feldman6c707942014-11-28 14:34:28 +01003705 if (!lw)
3706 return -ENOMEM;
3707
3708 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3709
Scott Feldmanc4f20322015-05-10 09:47:50 -07003710 lw->rocker_port = rocker_port;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003711 lw->trans = trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003712 lw->flags = flags;
3713 ether_addr_copy(lw->addr, addr);
3714 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3715
Jiri Pirko76c6f942015-09-24 10:02:44 +02003716 if (switchdev_trans_ph_prepare(trans))
Jiri Pirkob15edf82016-02-16 15:14:39 +01003717 rocker_kfree(trans, lw);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003718 else
3719 schedule_work(&lw->work);
Scott Feldman6c707942014-11-28 14:34:28 +01003720
3721 return 0;
3722}
3723
3724static struct rocker_fdb_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003725rocker_fdb_tbl_find(const struct rocker *rocker,
3726 const struct rocker_fdb_tbl_entry *match)
Scott Feldman6c707942014-11-28 14:34:28 +01003727{
3728 struct rocker_fdb_tbl_entry *found;
3729
3730 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3731 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3732 return found;
3733
3734 return NULL;
3735}
3736
3737static int rocker_port_fdb(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003738 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003739 const unsigned char *addr,
3740 __be16 vlan_id, int flags)
3741{
3742 struct rocker *rocker = rocker_port->rocker;
3743 struct rocker_fdb_tbl_entry *fdb;
3744 struct rocker_fdb_tbl_entry *found;
3745 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3746 unsigned long lock_flags;
3747
Jiri Pirkob15edf82016-02-16 15:14:39 +01003748 fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
Scott Feldman6c707942014-11-28 14:34:28 +01003749 if (!fdb)
3750 return -ENOMEM;
3751
3752 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
Scott Feldmana471be42015-09-23 08:39:14 -07003753 fdb->touched = jiffies;
Scott Feldman4c660492015-09-23 08:39:15 -07003754 fdb->key.rocker_port = rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01003755 ether_addr_copy(fdb->key.addr, addr);
3756 fdb->key.vlan_id = vlan_id;
3757 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3758
3759 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3760
3761 found = rocker_fdb_tbl_find(rocker, fdb);
3762
Scott Feldmana471be42015-09-23 08:39:14 -07003763 if (found) {
3764 found->touched = jiffies;
3765 if (removing) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01003766 rocker_kfree(trans, fdb);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003767 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmana471be42015-09-23 08:39:14 -07003768 hash_del(&found->entry);
3769 }
3770 } else if (!removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003771 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmana471be42015-09-23 08:39:14 -07003772 hash_add(rocker->fdb_tbl, &fdb->entry,
3773 fdb->key_crc32);
Scott Feldman6c707942014-11-28 14:34:28 +01003774 }
3775
3776 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3777
3778 /* Check if adding and already exists, or removing and can't find */
3779 if (!found != !removing) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01003780 rocker_kfree(trans, fdb);
Scott Feldman6c707942014-11-28 14:34:28 +01003781 if (!found && removing)
3782 return 0;
3783 /* Refreshing existing to update aging timers */
3784 flags |= ROCKER_OP_FLAG_REFRESH;
3785 }
3786
Jiri Pirko76c6f942015-09-24 10:02:44 +02003787 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003788}
3789
Scott Feldmanc4f20322015-05-10 09:47:50 -07003790static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003791 struct switchdev_trans *trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003792{
3793 struct rocker *rocker = rocker_port->rocker;
3794 struct rocker_fdb_tbl_entry *found;
3795 unsigned long lock_flags;
Scott Feldman6c707942014-11-28 14:34:28 +01003796 struct hlist_node *tmp;
3797 int bkt;
3798 int err = 0;
3799
3800 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3801 rocker_port->stp_state == BR_STATE_FORWARDING)
3802 return 0;
3803
Jiri Pirkod33eeb62015-10-14 19:40:54 +02003804 flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
Scott Feldman179f9a22015-06-12 21:35:46 -07003805
Scott Feldman6c707942014-11-28 14:34:28 +01003806 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3807
3808 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07003809 if (found->key.rocker_port != rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +01003810 continue;
3811 if (!found->learned)
3812 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003813 err = rocker_port_fdb_learn(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003814 found->key.addr,
3815 found->key.vlan_id);
3816 if (err)
3817 goto err_out;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003818 if (!switchdev_trans_ph_prepare(trans))
Simon Horman3098ac32015-05-21 12:40:14 +09003819 hash_del(&found->entry);
Scott Feldman6c707942014-11-28 14:34:28 +01003820 }
3821
3822err_out:
3823 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3824
3825 return err;
3826}
3827
Scott Feldman52fe3e22015-09-23 08:39:18 -07003828static void rocker_fdb_cleanup(unsigned long data)
3829{
3830 struct rocker *rocker = (struct rocker *)data;
3831 struct rocker_port *rocker_port;
3832 struct rocker_fdb_tbl_entry *entry;
3833 struct hlist_node *tmp;
3834 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3835 unsigned long expires;
3836 unsigned long lock_flags;
3837 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3838 ROCKER_OP_FLAG_LEARNED;
3839 int bkt;
3840
3841 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3842
3843 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3844 if (!entry->learned)
3845 continue;
3846 rocker_port = entry->key.rocker_port;
3847 expires = entry->touched + rocker_port->ageing_time;
3848 if (time_before_eq(expires, jiffies)) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003849 rocker_port_fdb_learn(rocker_port, NULL,
Scott Feldman52fe3e22015-09-23 08:39:18 -07003850 flags, entry->key.addr,
3851 entry->key.vlan_id);
3852 hash_del(&entry->entry);
3853 } else if (time_before(expires, next_timer)) {
3854 next_timer = expires;
3855 }
3856 }
3857
3858 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3859
3860 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3861}
3862
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003863static int rocker_port_router_mac(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003864 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003865 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003866{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003867 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003868 __be16 eth_type;
3869 const u8 *dst_mac_mask = ff_mac;
3870 __be16 vlan_id_mask = htons(0xffff);
3871 bool copy_to_cpu = false;
3872 int err;
3873
3874 if (ntohs(vlan_id) == 0)
3875 vlan_id = rocker_port->internal_vlan_id;
3876
3877 eth_type = htons(ETH_P_IP);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003878 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003879 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003880 eth_type, rocker_port->dev->dev_addr,
3881 dst_mac_mask, vlan_id, vlan_id_mask,
3882 copy_to_cpu, flags);
3883 if (err)
3884 return err;
3885
3886 eth_type = htons(ETH_P_IPV6);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003887 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003888 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003889 eth_type, rocker_port->dev->dev_addr,
3890 dst_mac_mask, vlan_id, vlan_id_mask,
3891 copy_to_cpu, flags);
3892
3893 return err;
3894}
3895
Scott Feldmanc4f20322015-05-10 09:47:50 -07003896static int rocker_port_fwding(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003897 struct switchdev_trans *trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003898{
3899 bool pop_vlan;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003900 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003901 __be16 vlan_id;
3902 u16 vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003903 int err;
3904
3905 /* Port will be forwarding-enabled if its STP state is LEARNING
3906 * or FORWARDING. Traffic from CPU can still egress, regardless of
3907 * port STP state. Use L2 interface group on port VLANs as a way
3908 * to toggle port forwarding: if forwarding is disabled, L2
3909 * interface group will not exist.
3910 */
3911
3912 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3913 rocker_port->stp_state != BR_STATE_FORWARDING)
3914 flags |= ROCKER_OP_FLAG_REMOVE;
3915
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003916 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003917 for (vid = 1; vid < VLAN_N_VID; vid++) {
3918 if (!test_bit(vid, rocker_port->vlan_bitmap))
3919 continue;
3920 vlan_id = htons(vid);
3921 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003922 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003923 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003924 if (err) {
3925 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003926 "Error (%d) port VLAN l2 group for pport %d\n",
3927 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003928 return err;
3929 }
3930 }
3931
3932 return 0;
3933}
3934
Scott Feldmanc4f20322015-05-10 09:47:50 -07003935static int rocker_port_stp_update(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003936 struct switchdev_trans *trans, int flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07003937 u8 state)
Scott Feldman6c707942014-11-28 14:34:28 +01003938{
3939 bool want[ROCKER_CTRL_MAX] = { 0, };
Scott Feldmanc4f20322015-05-10 09:47:50 -07003940 bool prev_ctrls[ROCKER_CTRL_MAX];
Jiri Pirko76c6f942015-09-24 10:02:44 +02003941 u8 uninitialized_var(prev_state);
Scott Feldman6c707942014-11-28 14:34:28 +01003942 int err;
3943 int i;
3944
Jiri Pirko76c6f942015-09-24 10:02:44 +02003945 if (switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003946 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3947 prev_state = rocker_port->stp_state;
3948 }
3949
Scott Feldman6c707942014-11-28 14:34:28 +01003950 if (rocker_port->stp_state == state)
3951 return 0;
3952
3953 rocker_port->stp_state = state;
3954
3955 switch (state) {
3956 case BR_STATE_DISABLED:
3957 /* port is completely disabled */
3958 break;
3959 case BR_STATE_LISTENING:
3960 case BR_STATE_BLOCKING:
3961 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3962 break;
3963 case BR_STATE_LEARNING:
3964 case BR_STATE_FORWARDING:
Simon Horman82549732015-07-16 10:39:14 +09003965 if (!rocker_port_is_ovsed(rocker_port))
3966 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003967 want[ROCKER_CTRL_IPV4_MCAST] = true;
3968 want[ROCKER_CTRL_IPV6_MCAST] = true;
3969 if (rocker_port_is_bridged(rocker_port))
3970 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
Simon Horman82549732015-07-16 10:39:14 +09003971 else if (rocker_port_is_ovsed(rocker_port))
3972 want[ROCKER_CTRL_DFLT_OVS] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003973 else
3974 want[ROCKER_CTRL_LOCAL_ARP] = true;
3975 break;
3976 }
3977
3978 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3979 if (want[i] != rocker_port->ctrls[i]) {
Scott Feldman179f9a22015-06-12 21:35:46 -07003980 int ctrl_flags = flags |
3981 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003982 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003983 &rocker_ctrls[i]);
3984 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003985 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003986 rocker_port->ctrls[i] = want[i];
3987 }
3988 }
3989
Jiri Pirko76c6f942015-09-24 10:02:44 +02003990 err = rocker_port_fdb_flush(rocker_port, trans, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01003991 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003992 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003993
Jiri Pirko76c6f942015-09-24 10:02:44 +02003994 err = rocker_port_fwding(rocker_port, trans, flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003995
3996err_out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02003997 if (switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003998 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3999 rocker_port->stp_state = prev_state;
4000 }
4001
4002 return err;
Scott Feldman6c707942014-11-28 14:34:28 +01004003}
4004
Scott Feldmanc4f20322015-05-10 09:47:50 -07004005static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004006 struct switchdev_trans *trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08004007{
4008 if (rocker_port_is_bridged(rocker_port))
4009 /* bridge STP will enable port */
4010 return 0;
4011
4012 /* port is not bridged, so simulate going to FORWARDING state */
Jiri Pirko76c6f942015-09-24 10:02:44 +02004013 return rocker_port_stp_update(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07004014 BR_STATE_FORWARDING);
Scott Feldmane47172a2015-02-25 20:15:38 -08004015}
4016
Scott Feldmanc4f20322015-05-10 09:47:50 -07004017static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004018 struct switchdev_trans *trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08004019{
4020 if (rocker_port_is_bridged(rocker_port))
4021 /* bridge STP will disable port */
4022 return 0;
4023
4024 /* port is not bridged, so simulate going to DISABLED state */
Jiri Pirko76c6f942015-09-24 10:02:44 +02004025 return rocker_port_stp_update(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07004026 BR_STATE_DISABLED);
Scott Feldmane47172a2015-02-25 20:15:38 -08004027}
4028
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004029static struct rocker_internal_vlan_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09004030rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004031{
4032 struct rocker_internal_vlan_tbl_entry *found;
4033
4034 hash_for_each_possible(rocker->internal_vlan_tbl, found,
4035 entry, ifindex) {
4036 if (found->ifindex == ifindex)
4037 return found;
4038 }
4039
4040 return NULL;
4041}
4042
4043static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
4044 int ifindex)
4045{
4046 struct rocker *rocker = rocker_port->rocker;
4047 struct rocker_internal_vlan_tbl_entry *entry;
4048 struct rocker_internal_vlan_tbl_entry *found;
4049 unsigned long lock_flags;
4050 int i;
4051
Simon Hormandf6a2062015-05-21 12:40:17 +09004052 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004053 if (!entry)
4054 return 0;
4055
4056 entry->ifindex = ifindex;
4057
4058 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4059
4060 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4061 if (found) {
Simon Hormandf6a2062015-05-21 12:40:17 +09004062 kfree(entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004063 goto found;
4064 }
4065
4066 found = entry;
4067 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
4068
4069 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
4070 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
4071 continue;
4072 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
4073 goto found;
4074 }
4075
4076 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
4077
4078found:
4079 found->ref_count++;
4080 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4081
4082 return found->vlan_id;
4083}
4084
Simon Hormane5054642015-05-25 14:28:36 +09004085static void
4086rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
4087 int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004088{
4089 struct rocker *rocker = rocker_port->rocker;
4090 struct rocker_internal_vlan_tbl_entry *found;
4091 unsigned long lock_flags;
4092 unsigned long bit;
4093
4094 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4095
4096 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4097 if (!found) {
4098 netdev_err(rocker_port->dev,
4099 "ifindex (%d) not found in internal VLAN tbl\n",
4100 ifindex);
4101 goto not_found;
4102 }
4103
4104 if (--found->ref_count <= 0) {
4105 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
4106 clear_bit(bit, rocker->internal_vlan_bitmap);
4107 hash_del(&found->entry);
Simon Hormandf6a2062015-05-21 12:40:17 +09004108 kfree(found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004109 }
4110
4111not_found:
4112 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4113}
4114
Scott Feldmanc4f20322015-05-10 09:47:50 -07004115static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004116 struct switchdev_trans *trans, __be32 dst,
Simon Hormane5054642015-05-25 14:28:36 +09004117 int dst_len, const struct fib_info *fi,
4118 u32 tb_id, int flags)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004119{
Simon Hormane5054642015-05-25 14:28:36 +09004120 const struct fib_nh *nh;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004121 __be16 eth_type = htons(ETH_P_IP);
4122 __be32 dst_mask = inet_make_mask(dst_len);
4123 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4124 u32 priority = fi->fib_priority;
4125 enum rocker_of_dpa_table_id goto_tbl =
4126 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4127 u32 group_id;
4128 bool nh_on_port;
4129 bool has_gw;
4130 u32 index;
4131 int err;
4132
4133 /* XXX support ECMP */
4134
4135 nh = fi->fib_nh;
4136 nh_on_port = (fi->fib_dev == rocker_port->dev);
4137 has_gw = !!nh->nh_gw;
4138
4139 if (has_gw && nh_on_port) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02004140 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004141 nh->nh_gw, &index);
4142 if (err)
4143 return err;
4144
4145 group_id = ROCKER_GROUP_L3_UNICAST(index);
4146 } else {
4147 /* Send to CPU for processing */
4148 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4149 }
4150
Jiri Pirko76c6f942015-09-24 10:02:44 +02004151 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004152 dst_mask, priority, goto_tbl,
4153 group_id, flags);
4154 if (err)
4155 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4156 err, &dst);
4157
4158 return err;
4159}
4160
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004161/*****************
4162 * Net device ops
4163 *****************/
4164
4165static int rocker_port_open(struct net_device *dev)
4166{
4167 struct rocker_port *rocker_port = netdev_priv(dev);
4168 int err;
4169
4170 err = rocker_port_dma_rings_init(rocker_port);
4171 if (err)
4172 return err;
4173
4174 err = request_irq(rocker_msix_tx_vector(rocker_port),
4175 rocker_tx_irq_handler, 0,
4176 rocker_driver_name, rocker_port);
4177 if (err) {
4178 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4179 goto err_request_tx_irq;
4180 }
4181
4182 err = request_irq(rocker_msix_rx_vector(rocker_port),
4183 rocker_rx_irq_handler, 0,
4184 rocker_driver_name, rocker_port);
4185 if (err) {
4186 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4187 goto err_request_rx_irq;
4188 }
4189
Jiri Pirkoe4201142016-02-16 15:14:45 +01004190 err = rocker_world_port_open(rocker_port);
4191 if (err) {
4192 netdev_err(rocker_port->dev, "cannot open port in world\n");
4193 goto err_world_port_open;
4194 }
4195
Jiri Pirko76c6f942015-09-24 10:02:44 +02004196 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01004197 if (err)
Scott Feldmane47172a2015-02-25 20:15:38 -08004198 goto err_fwd_enable;
Scott Feldman6c707942014-11-28 14:34:28 +01004199
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004200 napi_enable(&rocker_port->napi_tx);
4201 napi_enable(&rocker_port->napi_rx);
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004202 if (!dev->proto_down)
4203 rocker_port_set_enable(rocker_port, true);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004204 netif_start_queue(dev);
4205 return 0;
4206
Scott Feldmane47172a2015-02-25 20:15:38 -08004207err_fwd_enable:
Jiri Pirkoe4201142016-02-16 15:14:45 +01004208err_world_port_open:
Scott Feldman6c707942014-11-28 14:34:28 +01004209 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004210err_request_rx_irq:
4211 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4212err_request_tx_irq:
4213 rocker_port_dma_rings_fini(rocker_port);
4214 return err;
4215}
4216
4217static int rocker_port_stop(struct net_device *dev)
4218{
4219 struct rocker_port *rocker_port = netdev_priv(dev);
4220
4221 netif_stop_queue(dev);
4222 rocker_port_set_enable(rocker_port, false);
4223 napi_disable(&rocker_port->napi_rx);
4224 napi_disable(&rocker_port->napi_tx);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004225 rocker_world_port_stop(rocker_port);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004226 rocker_port_fwd_disable(rocker_port, NULL,
Scott Feldmanf66feaa2015-06-12 21:35:50 -07004227 ROCKER_OP_FLAG_NOWAIT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004228 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4229 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4230 rocker_port_dma_rings_fini(rocker_port);
4231
4232 return 0;
4233}
4234
Simon Hormane5054642015-05-25 14:28:36 +09004235static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4236 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004237{
Simon Hormane5054642015-05-25 14:28:36 +09004238 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004239 struct pci_dev *pdev = rocker->pdev;
Simon Hormane5054642015-05-25 14:28:36 +09004240 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004241 struct rocker_tlv *attr;
4242 int rem;
4243
4244 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4245 if (!attrs[ROCKER_TLV_TX_FRAGS])
4246 return;
4247 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
Simon Hormane5054642015-05-25 14:28:36 +09004248 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004249 dma_addr_t dma_handle;
4250 size_t len;
4251
4252 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4253 continue;
4254 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4255 attr);
4256 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4257 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4258 continue;
4259 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4260 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4261 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4262 }
4263}
4264
Simon Hormane5054642015-05-25 14:28:36 +09004265static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004266 struct rocker_desc_info *desc_info,
4267 char *buf, size_t buf_len)
4268{
Simon Hormane5054642015-05-25 14:28:36 +09004269 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004270 struct pci_dev *pdev = rocker->pdev;
4271 dma_addr_t dma_handle;
4272 struct rocker_tlv *frag;
4273
4274 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4275 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4276 if (net_ratelimit())
4277 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4278 return -EIO;
4279 }
4280 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4281 if (!frag)
4282 goto unmap_frag;
4283 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4284 dma_handle))
4285 goto nest_cancel;
4286 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4287 buf_len))
4288 goto nest_cancel;
4289 rocker_tlv_nest_end(desc_info, frag);
4290 return 0;
4291
4292nest_cancel:
4293 rocker_tlv_nest_cancel(desc_info, frag);
4294unmap_frag:
4295 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4296 return -EMSGSIZE;
4297}
4298
4299static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4300{
4301 struct rocker_port *rocker_port = netdev_priv(dev);
4302 struct rocker *rocker = rocker_port->rocker;
4303 struct rocker_desc_info *desc_info;
4304 struct rocker_tlv *frags;
4305 int i;
4306 int err;
4307
4308 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4309 if (unlikely(!desc_info)) {
4310 if (net_ratelimit())
4311 netdev_err(dev, "tx ring full when queue awake\n");
4312 return NETDEV_TX_BUSY;
4313 }
4314
4315 rocker_desc_cookie_ptr_set(desc_info, skb);
4316
4317 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4318 if (!frags)
4319 goto out;
4320 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4321 skb->data, skb_headlen(skb));
4322 if (err)
4323 goto nest_cancel;
Jiri Pirko95b9be62015-08-02 20:56:38 +02004324 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4325 err = skb_linearize(skb);
4326 if (err)
4327 goto unmap_frags;
4328 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004329
4330 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4331 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4332
4333 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4334 skb_frag_address(frag),
4335 skb_frag_size(frag));
4336 if (err)
4337 goto unmap_frags;
4338 }
4339 rocker_tlv_nest_end(desc_info, frags);
4340
4341 rocker_desc_gen_clear(desc_info);
4342 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4343
4344 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4345 if (!desc_info)
4346 netif_stop_queue(dev);
4347
4348 return NETDEV_TX_OK;
4349
4350unmap_frags:
4351 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4352nest_cancel:
4353 rocker_tlv_nest_cancel(desc_info, frags);
4354out:
4355 dev_kfree_skb(skb);
David Ahernf2bbca52015-01-16 14:22:29 -07004356 dev->stats.tx_dropped++;
4357
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004358 return NETDEV_TX_OK;
4359}
4360
4361static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4362{
4363 struct sockaddr *addr = p;
4364 struct rocker_port *rocker_port = netdev_priv(dev);
4365 int err;
4366
4367 if (!is_valid_ether_addr(addr->sa_data))
4368 return -EADDRNOTAVAIL;
4369
4370 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4371 if (err)
4372 return err;
4373 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4374 return 0;
4375}
4376
Scott Feldman77a58c72015-07-08 16:06:47 -07004377static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4378{
4379 struct rocker_port *rocker_port = netdev_priv(dev);
4380 int running = netif_running(dev);
4381 int err;
4382
4383#define ROCKER_PORT_MIN_MTU 68
4384#define ROCKER_PORT_MAX_MTU 9000
4385
4386 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4387 return -EINVAL;
4388
4389 if (running)
4390 rocker_port_stop(dev);
4391
4392 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4393 dev->mtu = new_mtu;
4394
4395 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4396 if (err)
4397 return err;
4398
4399 if (running)
4400 err = rocker_port_open(dev);
4401
4402 return err;
4403}
4404
David Aherndb191702015-03-17 20:23:16 -06004405static int rocker_port_get_phys_port_name(struct net_device *dev,
4406 char *buf, size_t len)
4407{
4408 struct rocker_port *rocker_port = netdev_priv(dev);
4409 struct port_name name = { .buf = buf, .len = len };
4410 int err;
4411
Jiri Pirkoae3907e2016-02-16 15:14:48 +01004412 err = rocker_cmd_exec(rocker_port, 0,
David Aherndb191702015-03-17 20:23:16 -06004413 rocker_cmd_get_port_settings_prep, NULL,
4414 rocker_cmd_get_port_settings_phys_name_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004415 &name);
David Aherndb191702015-03-17 20:23:16 -06004416
4417 return err ? -EOPNOTSUPP : 0;
4418}
4419
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004420static int rocker_port_change_proto_down(struct net_device *dev,
4421 bool proto_down)
4422{
4423 struct rocker_port *rocker_port = netdev_priv(dev);
4424
4425 if (rocker_port->dev->flags & IFF_UP)
4426 rocker_port_set_enable(rocker_port, !proto_down);
4427 rocker_port->dev->proto_down = proto_down;
4428 return 0;
4429}
4430
Scott Feldmandd19f832015-08-12 18:45:25 -07004431static void rocker_port_neigh_destroy(struct neighbour *n)
4432{
4433 struct rocker_port *rocker_port = netdev_priv(n->dev);
4434 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4435 __be32 ip_addr = *(__be32 *)n->primary_key;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004436 int err;
Scott Feldmandd19f832015-08-12 18:45:25 -07004437
Jiri Pirko76c6f942015-09-24 10:02:44 +02004438 rocker_port_ipv4_neigh(rocker_port, NULL,
Scott Feldmandd19f832015-08-12 18:45:25 -07004439 flags, ip_addr, n->ha);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004440 err = rocker_world_port_neigh_destroy(rocker_port, n);
4441 if (err)
4442 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
4443 err);
Scott Feldmandd19f832015-08-12 18:45:25 -07004444}
4445
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004446static const struct net_device_ops rocker_port_netdev_ops = {
4447 .ndo_open = rocker_port_open,
4448 .ndo_stop = rocker_port_stop,
4449 .ndo_start_xmit = rocker_port_xmit,
4450 .ndo_set_mac_address = rocker_port_set_mac_address,
Scott Feldman77a58c72015-07-08 16:06:47 -07004451 .ndo_change_mtu = rocker_port_change_mtu,
Scott Feldman85fdb952015-05-10 09:48:05 -07004452 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
Scott Feldmanfc8f40d2015-05-10 09:47:57 -07004453 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
Scott Feldman54ba5a02015-05-10 09:48:01 -07004454 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004455 .ndo_fdb_add = switchdev_port_fdb_add,
4456 .ndo_fdb_del = switchdev_port_fdb_del,
4457 .ndo_fdb_dump = switchdev_port_fdb_dump,
David Aherndb191702015-03-17 20:23:16 -06004458 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004459 .ndo_change_proto_down = rocker_port_change_proto_down,
Scott Feldmandd19f832015-08-12 18:45:25 -07004460 .ndo_neigh_destroy = rocker_port_neigh_destroy,
Scott Feldman98237d42015-03-15 21:07:15 -07004461};
4462
4463/********************
4464 * swdev interface
4465 ********************/
4466
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004467static int rocker_port_attr_get(struct net_device *dev,
4468 struct switchdev_attr *attr)
Scott Feldman98237d42015-03-15 21:07:15 -07004469{
Simon Hormane5054642015-05-25 14:28:36 +09004470 const struct rocker_port *rocker_port = netdev_priv(dev);
4471 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004472 int err = 0;
Scott Feldman98237d42015-03-15 21:07:15 -07004473
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004474 switch (attr->id) {
Jiri Pirko1f868392015-10-01 11:03:42 +02004475 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
Scott Feldman42275bd2015-05-13 11:16:50 -07004476 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4477 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004478 break;
Jiri Pirko1f868392015-10-01 11:03:42 +02004479 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
Scott Feldman42275bd2015-05-13 11:16:50 -07004480 attr->u.brport_flags = rocker_port->brport_flags;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004481 err = rocker_world_port_attr_bridge_flags_get(rocker_port,
4482 &attr->u.brport_flags);
Scott Feldman6004c862015-05-10 09:47:55 -07004483 break;
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004484 default:
4485 return -EOPNOTSUPP;
4486 }
4487
Jiri Pirkoe4201142016-02-16 15:14:45 +01004488 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004489}
4490
Scott Feldman6004c862015-05-10 09:47:55 -07004491static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004492 struct switchdev_trans *trans,
Scott Feldman6004c862015-05-10 09:47:55 -07004493 unsigned long brport_flags)
4494{
4495 unsigned long orig_flags;
4496 int err = 0;
4497
4498 orig_flags = rocker_port->brport_flags;
4499 rocker_port->brport_flags = brport_flags;
Jiri Pirkoae3907e2016-02-16 15:14:48 +01004500 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING &&
4501 !switchdev_trans_ph_prepare(trans))
4502 err = rocker_port_set_learning(rocker_port,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01004503 !!(rocker_port->brport_flags & BR_LEARNING));
Scott Feldman6004c862015-05-10 09:47:55 -07004504
Jiri Pirko76c6f942015-09-24 10:02:44 +02004505 if (switchdev_trans_ph_prepare(trans))
Scott Feldman6004c862015-05-10 09:47:55 -07004506 rocker_port->brport_flags = orig_flags;
4507
4508 return err;
4509}
4510
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004511static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4512 struct switchdev_trans *trans,
4513 u32 ageing_time)
4514{
4515 if (!switchdev_trans_ph_prepare(trans)) {
4516 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4517 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4518 }
4519
4520 return 0;
4521}
4522
Scott Feldmanc4f20322015-05-10 09:47:50 -07004523static int rocker_port_attr_set(struct net_device *dev,
Jiri Pirkof7fadf32015-10-14 19:40:49 +02004524 const struct switchdev_attr *attr,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02004525 struct switchdev_trans *trans)
Scott Feldmanc4f20322015-05-10 09:47:50 -07004526{
4527 struct rocker_port *rocker_port = netdev_priv(dev);
4528 int err = 0;
4529
Scott Feldmanc4f20322015-05-10 09:47:50 -07004530 switch (attr->id) {
Jiri Pirko1f868392015-10-01 11:03:42 +02004531 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
Jiri Pirkod33eeb62015-10-14 19:40:54 +02004532 err = rocker_port_stp_update(rocker_port, trans, 0,
Scott Feldman42275bd2015-05-13 11:16:50 -07004533 attr->u.stp_state);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004534 if (err)
4535 break;
4536 err = rocker_world_port_attr_stp_state_set(rocker_port,
4537 attr->u.stp_state,
4538 trans);
Scott Feldman35636062015-05-10 09:47:51 -07004539 break;
Jiri Pirko1f868392015-10-01 11:03:42 +02004540 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
Jiri Pirko76c6f942015-09-24 10:02:44 +02004541 err = rocker_port_brport_flags_set(rocker_port, trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004542 attr->u.brport_flags);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004543 if (err)
4544 break;
4545 err = rocker_world_port_attr_bridge_flags_set(rocker_port,
4546 attr->u.brport_flags,
4547 trans);
Scott Feldman6004c862015-05-10 09:47:55 -07004548 break;
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004549 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4550 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4551 attr->u.ageing_time);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004552 if (err)
4553 break;
4554 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
4555 attr->u.ageing_time,
4556 trans);
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004557 break;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004558 default:
4559 err = -EOPNOTSUPP;
4560 break;
4561 }
4562
4563 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004564}
4565
Scott Feldman9228ad22015-05-10 09:47:54 -07004566static int rocker_port_vlan_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004567 struct switchdev_trans *trans,
4568 u16 vid, u16 flags)
Scott Feldman9228ad22015-05-10 09:47:54 -07004569{
4570 int err;
4571
4572 /* XXX deal with flags for PVID and untagged */
4573
Jiri Pirko76c6f942015-09-24 10:02:44 +02004574 err = rocker_port_vlan(rocker_port, trans, 0, vid);
Scott Feldman9228ad22015-05-10 09:47:54 -07004575 if (err)
4576 return err;
4577
Jiri Pirko76c6f942015-09-24 10:02:44 +02004578 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
Scott Feldmancec04a62015-06-01 11:39:03 -07004579 if (err)
Jiri Pirko76c6f942015-09-24 10:02:44 +02004580 rocker_port_vlan(rocker_port, trans,
Scott Feldmancec04a62015-06-01 11:39:03 -07004581 ROCKER_OP_FLAG_REMOVE, vid);
4582
4583 return err;
Scott Feldman9228ad22015-05-10 09:47:54 -07004584}
4585
4586static int rocker_port_vlans_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004587 struct switchdev_trans *trans,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004588 const struct switchdev_obj_port_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004589{
4590 u16 vid;
4591 int err;
4592
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004593 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02004594 err = rocker_port_vlan_add(rocker_port, trans,
Scott Feldman9228ad22015-05-10 09:47:54 -07004595 vid, vlan->flags);
4596 if (err)
4597 return err;
4598 }
4599
4600 return 0;
4601}
4602
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004603static int rocker_port_fdb_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004604 struct switchdev_trans *trans,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004605 const struct switchdev_obj_port_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004606{
4607 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4608 int flags = 0;
4609
4610 if (!rocker_port_is_bridged(rocker_port))
4611 return -EINVAL;
4612
Jiri Pirko76c6f942015-09-24 10:02:44 +02004613 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004614}
4615
Scott Feldman9228ad22015-05-10 09:47:54 -07004616static int rocker_port_obj_add(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004617 const struct switchdev_obj *obj,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02004618 struct switchdev_trans *trans)
Scott Feldman9228ad22015-05-10 09:47:54 -07004619{
4620 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004621 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004622 int err = 0;
4623
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004624 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004625 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004626 err = rocker_port_vlans_add(rocker_port, trans,
4627 SWITCHDEV_OBJ_PORT_VLAN(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004628 if (err)
4629 break;
4630 err = rocker_world_port_obj_vlan_add(rocker_port,
4631 SWITCHDEV_OBJ_PORT_VLAN(obj),
4632 trans);
Scott Feldman9228ad22015-05-10 09:47:54 -07004633 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004634 case SWITCHDEV_OBJ_ID_IPV4_FIB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004635 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004636 err = rocker_port_fib_ipv4(rocker_port, trans,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004637 htonl(fib4->dst), fib4->dst_len,
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004638 &fib4->fi, fib4->tb_id, 0);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004639 if (err)
4640 break;
4641 err = rocker_world_port_obj_fib4_add(rocker_port,
4642 SWITCHDEV_OBJ_IPV4_FIB(obj),
4643 trans);
Scott Feldman58c2cb12015-05-10 09:48:06 -07004644 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004645 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004646 err = rocker_port_fdb_add(rocker_port, trans,
4647 SWITCHDEV_OBJ_PORT_FDB(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004648 if (err)
4649 break;
4650 err = rocker_world_port_obj_fdb_add(rocker_port,
4651 SWITCHDEV_OBJ_PORT_FDB(obj),
4652 trans);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004653 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004654 default:
4655 err = -EOPNOTSUPP;
4656 break;
4657 }
4658
4659 return err;
4660}
4661
4662static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4663 u16 vid, u16 flags)
4664{
4665 int err;
4666
Jiri Pirko76c6f942015-09-24 10:02:44 +02004667 err = rocker_port_router_mac(rocker_port, NULL,
Scott Feldman9228ad22015-05-10 09:47:54 -07004668 ROCKER_OP_FLAG_REMOVE, htons(vid));
4669 if (err)
4670 return err;
4671
Jiri Pirko76c6f942015-09-24 10:02:44 +02004672 return rocker_port_vlan(rocker_port, NULL,
Scott Feldman9228ad22015-05-10 09:47:54 -07004673 ROCKER_OP_FLAG_REMOVE, vid);
4674}
4675
4676static int rocker_port_vlans_del(struct rocker_port *rocker_port,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004677 const struct switchdev_obj_port_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004678{
4679 u16 vid;
4680 int err;
4681
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004682 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004683 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4684 if (err)
4685 return err;
4686 }
4687
4688 return 0;
4689}
4690
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004691static int rocker_port_fdb_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004692 struct switchdev_trans *trans,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004693 const struct switchdev_obj_port_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004694{
4695 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
Jiri Pirkod33eeb62015-10-14 19:40:54 +02004696 int flags = ROCKER_OP_FLAG_REMOVE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004697
4698 if (!rocker_port_is_bridged(rocker_port))
4699 return -EINVAL;
4700
Jiri Pirko76c6f942015-09-24 10:02:44 +02004701 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004702}
4703
Scott Feldman9228ad22015-05-10 09:47:54 -07004704static int rocker_port_obj_del(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004705 const struct switchdev_obj *obj)
Scott Feldman9228ad22015-05-10 09:47:54 -07004706{
4707 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004708 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004709 int err = 0;
4710
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004711 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004712 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004713 err = rocker_port_vlans_del(rocker_port,
4714 SWITCHDEV_OBJ_PORT_VLAN(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004715 if (err)
4716 break;
4717 err = rocker_world_port_obj_vlan_del(rocker_port,
4718 SWITCHDEV_OBJ_PORT_VLAN(obj));
Scott Feldman9228ad22015-05-10 09:47:54 -07004719 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004720 case SWITCHDEV_OBJ_ID_IPV4_FIB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004721 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004722 err = rocker_port_fib_ipv4(rocker_port, NULL,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004723 htonl(fib4->dst), fib4->dst_len,
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004724 &fib4->fi, fib4->tb_id,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004725 ROCKER_OP_FLAG_REMOVE);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004726 if (err)
4727 break;
4728 err = rocker_world_port_obj_fib4_del(rocker_port,
4729 SWITCHDEV_OBJ_IPV4_FIB(obj));
Scott Feldman58c2cb12015-05-10 09:48:06 -07004730 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004731 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004732 err = rocker_port_fdb_del(rocker_port, NULL,
4733 SWITCHDEV_OBJ_PORT_FDB(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004734 if (err)
4735 break;
4736 err = rocker_world_port_obj_fdb_del(rocker_port,
4737 SWITCHDEV_OBJ_PORT_FDB(obj));
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004738 break;
4739 default:
4740 err = -EOPNOTSUPP;
4741 break;
4742 }
4743
4744 return err;
4745}
4746
Simon Hormane5054642015-05-25 14:28:36 +09004747static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004748 struct switchdev_obj_port_fdb *fdb,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004749 switchdev_obj_dump_cb_t *cb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004750{
4751 struct rocker *rocker = rocker_port->rocker;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004752 struct rocker_fdb_tbl_entry *found;
4753 struct hlist_node *tmp;
4754 unsigned long lock_flags;
4755 int bkt;
4756 int err = 0;
4757
4758 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4759 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07004760 if (found->key.rocker_port != rocker_port)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004761 continue;
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004762 ether_addr_copy(fdb->addr, found->key.addr);
Vivien Didelotce80e7b2015-08-10 09:09:52 -04004763 fdb->ndm_state = NUD_REACHABLE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004764 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4765 found->key.vlan_id);
Jiri Pirko648b4a92015-10-01 11:03:45 +02004766 err = cb(&fdb->obj);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004767 if (err)
4768 break;
4769 }
4770 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4771
4772 return err;
4773}
4774
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004775static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004776 struct switchdev_obj_port_vlan *vlan,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004777 switchdev_obj_dump_cb_t *cb)
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004778{
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004779 u16 vid;
4780 int err = 0;
4781
4782 for (vid = 1; vid < VLAN_N_VID; vid++) {
4783 if (!test_bit(vid, rocker_port->vlan_bitmap))
4784 continue;
4785 vlan->flags = 0;
4786 if (rocker_vlan_id_is_internal(htons(vid)))
4787 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01004788 vlan->vid_begin = vid;
4789 vlan->vid_end = vid;
Jiri Pirko648b4a92015-10-01 11:03:45 +02004790 err = cb(&vlan->obj);
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004791 if (err)
4792 break;
4793 }
4794
4795 return err;
4796}
4797
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004798static int rocker_port_obj_dump(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004799 struct switchdev_obj *obj,
4800 switchdev_obj_dump_cb_t *cb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004801{
Simon Hormane5054642015-05-25 14:28:36 +09004802 const struct rocker_port *rocker_port = netdev_priv(dev);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004803 int err = 0;
4804
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004805 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004806 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004807 err = rocker_port_fdb_dump(rocker_port,
4808 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004809 if (err)
4810 break;
4811 err = rocker_world_port_obj_fdb_dump(rocker_port,
4812 SWITCHDEV_OBJ_PORT_FDB(obj),
4813 cb);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004814 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004815 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004816 err = rocker_port_vlan_dump(rocker_port,
4817 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004818 if (err)
4819 break;
4820 err = rocker_world_port_obj_vlan_dump(rocker_port,
4821 SWITCHDEV_OBJ_PORT_VLAN(obj),
4822 cb);
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004823 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004824 default:
4825 err = -EOPNOTSUPP;
4826 break;
4827 }
4828
4829 return err;
4830}
4831
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004832static const struct switchdev_ops rocker_port_switchdev_ops = {
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004833 .switchdev_port_attr_get = rocker_port_attr_get,
Scott Feldman35636062015-05-10 09:47:51 -07004834 .switchdev_port_attr_set = rocker_port_attr_set,
Scott Feldman9228ad22015-05-10 09:47:54 -07004835 .switchdev_port_obj_add = rocker_port_obj_add,
4836 .switchdev_port_obj_del = rocker_port_obj_del,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004837 .switchdev_port_obj_dump = rocker_port_obj_dump,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004838};
4839
4840/********************
4841 * ethtool interface
4842 ********************/
4843
4844static int rocker_port_get_settings(struct net_device *dev,
4845 struct ethtool_cmd *ecmd)
4846{
4847 struct rocker_port *rocker_port = netdev_priv(dev);
4848
4849 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4850}
4851
4852static int rocker_port_set_settings(struct net_device *dev,
4853 struct ethtool_cmd *ecmd)
4854{
4855 struct rocker_port *rocker_port = netdev_priv(dev);
4856
4857 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4858}
4859
4860static void rocker_port_get_drvinfo(struct net_device *dev,
4861 struct ethtool_drvinfo *drvinfo)
4862{
4863 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4864 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4865}
4866
David Ahern9766e972015-01-29 20:59:33 -07004867static struct rocker_port_stats {
4868 char str[ETH_GSTRING_LEN];
4869 int type;
4870} rocker_port_stats[] = {
4871 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4872 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4873 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4874 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4875
4876 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4877 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4878 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4879 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4880};
4881
4882#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4883
4884static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4885 u8 *data)
4886{
4887 u8 *p = data;
4888 int i;
4889
4890 switch (stringset) {
4891 case ETH_SS_STATS:
4892 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4893 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4894 p += ETH_GSTRING_LEN;
4895 }
4896 break;
4897 }
4898}
4899
4900static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004901rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
David Ahern9766e972015-01-29 20:59:33 -07004902 struct rocker_desc_info *desc_info,
4903 void *priv)
4904{
4905 struct rocker_tlv *cmd_stats;
4906
4907 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4908 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4909 return -EMSGSIZE;
4910
4911 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4912 if (!cmd_stats)
4913 return -EMSGSIZE;
4914
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004915 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4916 rocker_port->pport))
David Ahern9766e972015-01-29 20:59:33 -07004917 return -EMSGSIZE;
4918
4919 rocker_tlv_nest_end(desc_info, cmd_stats);
4920
4921 return 0;
4922}
4923
4924static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004925rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004926 const struct rocker_desc_info *desc_info,
David Ahern9766e972015-01-29 20:59:33 -07004927 void *priv)
4928{
Simon Hormane5054642015-05-25 14:28:36 +09004929 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4930 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4931 const struct rocker_tlv *pattr;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004932 u32 pport;
David Ahern9766e972015-01-29 20:59:33 -07004933 u64 *data = priv;
4934 int i;
4935
4936 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4937
4938 if (!attrs[ROCKER_TLV_CMD_INFO])
4939 return -EIO;
4940
4941 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4942 attrs[ROCKER_TLV_CMD_INFO]);
4943
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004944 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
David Ahern9766e972015-01-29 20:59:33 -07004945 return -EIO;
4946
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004947 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4948 if (pport != rocker_port->pport)
David Ahern9766e972015-01-29 20:59:33 -07004949 return -EIO;
4950
4951 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4952 pattr = stats_attrs[rocker_port_stats[i].type];
4953 if (!pattr)
4954 continue;
4955
4956 data[i] = rocker_tlv_get_u64(pattr);
4957 }
4958
4959 return 0;
4960}
4961
4962static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4963 void *priv)
4964{
Jiri Pirkoae3907e2016-02-16 15:14:48 +01004965 return rocker_cmd_exec(rocker_port, 0,
David Ahern9766e972015-01-29 20:59:33 -07004966 rocker_cmd_get_port_stats_prep, NULL,
4967 rocker_cmd_get_port_stats_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004968 priv);
David Ahern9766e972015-01-29 20:59:33 -07004969}
4970
4971static void rocker_port_get_stats(struct net_device *dev,
4972 struct ethtool_stats *stats, u64 *data)
4973{
4974 struct rocker_port *rocker_port = netdev_priv(dev);
4975
4976 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4977 int i;
4978
4979 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4980 data[i] = 0;
4981 }
David Ahern9766e972015-01-29 20:59:33 -07004982}
4983
4984static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4985{
4986 switch (sset) {
4987 case ETH_SS_STATS:
4988 return ROCKER_PORT_STATS_LEN;
4989 default:
4990 return -EOPNOTSUPP;
4991 }
4992}
4993
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004994static const struct ethtool_ops rocker_port_ethtool_ops = {
4995 .get_settings = rocker_port_get_settings,
4996 .set_settings = rocker_port_set_settings,
4997 .get_drvinfo = rocker_port_get_drvinfo,
4998 .get_link = ethtool_op_get_link,
David Ahern9766e972015-01-29 20:59:33 -07004999 .get_strings = rocker_port_get_strings,
5000 .get_ethtool_stats = rocker_port_get_stats,
5001 .get_sset_count = rocker_port_get_sset_count,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005002};
5003
5004/*****************
5005 * NAPI interface
5006 *****************/
5007
5008static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
5009{
5010 return container_of(napi, struct rocker_port, napi_tx);
5011}
5012
5013static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
5014{
5015 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09005016 const struct rocker *rocker = rocker_port->rocker;
5017 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005018 u32 credits = 0;
5019 int err;
5020
5021 /* Cleanup tx descriptors */
5022 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
David Ahernf2bbca52015-01-16 14:22:29 -07005023 struct sk_buff *skb;
5024
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005025 err = rocker_desc_err(desc_info);
5026 if (err && net_ratelimit())
5027 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
5028 err);
5029 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
David Ahernf2bbca52015-01-16 14:22:29 -07005030
5031 skb = rocker_desc_cookie_ptr_get(desc_info);
5032 if (err == 0) {
5033 rocker_port->dev->stats.tx_packets++;
5034 rocker_port->dev->stats.tx_bytes += skb->len;
Scott Feldman4725ceb2015-05-10 09:48:08 -07005035 } else {
David Ahernf2bbca52015-01-16 14:22:29 -07005036 rocker_port->dev->stats.tx_errors++;
Scott Feldman4725ceb2015-05-10 09:48:08 -07005037 }
David Ahernf2bbca52015-01-16 14:22:29 -07005038
5039 dev_kfree_skb_any(skb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005040 credits++;
5041 }
5042
5043 if (credits && netif_queue_stopped(rocker_port->dev))
5044 netif_wake_queue(rocker_port->dev);
5045
5046 napi_complete(napi);
5047 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
5048
5049 return 0;
5050}
5051
Simon Hormane5054642015-05-25 14:28:36 +09005052static int rocker_port_rx_proc(const struct rocker *rocker,
5053 const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005054 struct rocker_desc_info *desc_info)
5055{
Simon Hormane5054642015-05-25 14:28:36 +09005056 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005057 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
5058 size_t rx_len;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005059 u16 rx_flags = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005060
5061 if (!skb)
5062 return -ENOENT;
5063
5064 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
5065 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
5066 return -EINVAL;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005067 if (attrs[ROCKER_TLV_RX_FLAGS])
5068 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005069
5070 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
5071
5072 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
5073 skb_put(skb, rx_len);
5074 skb->protocol = eth_type_trans(skb, rocker_port->dev);
David Ahernf2bbca52015-01-16 14:22:29 -07005075
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005076 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
5077 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
5078
David Ahernf2bbca52015-01-16 14:22:29 -07005079 rocker_port->dev->stats.rx_packets++;
5080 rocker_port->dev->stats.rx_bytes += skb->len;
5081
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005082 netif_receive_skb(skb);
5083
Simon Horman534ba6a2015-06-01 13:25:04 +09005084 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005085}
5086
5087static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
5088{
5089 return container_of(napi, struct rocker_port, napi_rx);
5090}
5091
5092static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
5093{
5094 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09005095 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005096 struct rocker_desc_info *desc_info;
5097 u32 credits = 0;
5098 int err;
5099
5100 /* Process rx descriptors */
5101 while (credits < budget &&
5102 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
5103 err = rocker_desc_err(desc_info);
5104 if (err) {
5105 if (net_ratelimit())
5106 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
5107 err);
5108 } else {
5109 err = rocker_port_rx_proc(rocker, rocker_port,
5110 desc_info);
5111 if (err && net_ratelimit())
5112 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
5113 err);
5114 }
David Ahernf2bbca52015-01-16 14:22:29 -07005115 if (err)
5116 rocker_port->dev->stats.rx_errors++;
5117
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005118 rocker_desc_gen_clear(desc_info);
5119 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
5120 credits++;
5121 }
5122
5123 if (credits < budget)
5124 napi_complete(napi);
5125
5126 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
5127
5128 return credits;
5129}
5130
5131/*****************
5132 * PCI driver ops
5133 *****************/
5134
Simon Hormane5054642015-05-25 14:28:36 +09005135static void rocker_carrier_init(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005136{
Simon Hormane5054642015-05-25 14:28:36 +09005137 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005138 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
5139 bool link_up;
5140
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08005141 link_up = link_status & (1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005142 if (link_up)
5143 netif_carrier_on(rocker_port->dev);
5144 else
5145 netif_carrier_off(rocker_port->dev);
5146}
5147
Jiri Pirkoe4201142016-02-16 15:14:45 +01005148static void rocker_remove_ports(struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005149{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005150 struct rocker_port *rocker_port;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005151 int i;
5152
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005153 for (i = 0; i < rocker->port_count; i++) {
5154 rocker_port = rocker->ports[i];
Scott Feldmana0720312015-06-12 21:09:44 -07005155 if (!rocker_port)
5156 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02005157 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
Jiri Pirkoe4201142016-02-16 15:14:45 +01005158 rocker_world_port_fini(rocker_port);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005159 unregister_netdev(rocker_port->dev);
Jiri Pirkoe4201142016-02-16 15:14:45 +01005160 rocker_world_port_post_fini(rocker_port);
Ido Schimmel1ebd47e2015-08-02 19:29:16 +02005161 free_netdev(rocker_port->dev);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005162 }
Jiri Pirkoe4201142016-02-16 15:14:45 +01005163 rocker_world_fini(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005164 kfree(rocker->ports);
5165}
5166
Simon Horman534ba6a2015-06-01 13:25:04 +09005167static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005168{
Simon Horman534ba6a2015-06-01 13:25:04 +09005169 const struct rocker *rocker = rocker_port->rocker;
Simon Hormane5054642015-05-25 14:28:36 +09005170 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005171 int err;
5172
5173 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
5174 rocker_port->dev->dev_addr);
5175 if (err) {
5176 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
5177 eth_hw_addr_random(rocker_port->dev);
5178 }
5179}
5180
5181static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
5182{
Simon Hormane5054642015-05-25 14:28:36 +09005183 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005184 struct rocker_port *rocker_port;
5185 struct net_device *dev;
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005186 u16 untagged_vid = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005187 int err;
5188
5189 dev = alloc_etherdev(sizeof(struct rocker_port));
5190 if (!dev)
5191 return -ENOMEM;
5192 rocker_port = netdev_priv(dev);
5193 rocker_port->dev = dev;
5194 rocker_port->rocker = rocker;
5195 rocker_port->port_number = port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08005196 rocker_port->pport = port_number + 1;
Scott Feldman5111f802014-11-28 14:34:30 +01005197 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
Scott Feldmane7335702015-09-23 08:39:17 -07005198 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005199
Jiri Pirkoe4201142016-02-16 15:14:45 +01005200 err = rocker_world_check_init(rocker_port);
5201 if (err) {
5202 dev_err(&pdev->dev, "world init failed\n");
5203 goto err_world_check_init;
5204 }
5205
Simon Horman534ba6a2015-06-01 13:25:04 +09005206 rocker_port_dev_addr_init(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005207 dev->netdev_ops = &rocker_port_netdev_ops;
5208 dev->ethtool_ops = &rocker_port_ethtool_ops;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07005209 dev->switchdev_ops = &rocker_port_switchdev_ops;
Eric Dumazetd64b5e82015-11-18 06:31:00 -08005210 netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01005211 NAPI_POLL_WEIGHT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005212 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5213 NAPI_POLL_WEIGHT);
5214 rocker_carrier_init(rocker_port);
5215
Ido Schimmel21518a62015-08-02 20:56:37 +02005216 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005217
Jiri Pirkoe4201142016-02-16 15:14:45 +01005218 err = rocker_world_port_pre_init(rocker_port);
5219 if (err) {
5220 dev_err(&pdev->dev, "port world pre-init failed\n");
5221 goto err_world_port_pre_init;
5222 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005223 err = register_netdev(dev);
5224 if (err) {
5225 dev_err(&pdev->dev, "register_netdev failed\n");
5226 goto err_register_netdev;
5227 }
5228 rocker->ports[port_number] = rocker_port;
5229
Jiri Pirkoe4201142016-02-16 15:14:45 +01005230 err = rocker_world_port_init(rocker_port);
5231 if (err) {
5232 dev_err(&pdev->dev, "port world init failed\n");
5233 goto err_world_port_init;
5234 }
5235
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005236 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5237
Jiri Pirkoae3907e2016-02-16 15:14:48 +01005238 rocker_port_set_learning(rocker_port,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01005239 !!(rocker_port->brport_flags & BR_LEARNING));
Scott Feldman5111f802014-11-28 14:34:30 +01005240
Jiri Pirko76c6f942015-09-24 10:02:44 +02005241 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005242 if (err) {
Scott Feldmanff147022015-08-03 22:31:18 -07005243 netdev_err(rocker_port->dev, "install ig port table failed\n");
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005244 goto err_port_ig_tbl;
5245 }
5246
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005247 rocker_port->internal_vlan_id =
5248 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5249
Jiri Pirko76c6f942015-09-24 10:02:44 +02005250 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005251 if (err) {
5252 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5253 goto err_untagged_vlan;
5254 }
5255
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005256 return 0;
5257
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005258err_untagged_vlan:
Jiri Pirko76c6f942015-09-24 10:02:44 +02005259 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005260err_port_ig_tbl:
Jiri Pirkoe4201142016-02-16 15:14:45 +01005261 rocker_world_port_fini(rocker_port);
5262err_world_port_init:
Scott Feldman6c4f7782015-08-03 22:31:17 -07005263 rocker->ports[port_number] = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005264 unregister_netdev(dev);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005265err_register_netdev:
Jiri Pirkoe4201142016-02-16 15:14:45 +01005266 rocker_world_port_post_fini(rocker_port);
5267err_world_port_pre_init:
5268err_world_check_init:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005269 free_netdev(dev);
5270 return err;
5271}
5272
5273static int rocker_probe_ports(struct rocker *rocker)
5274{
5275 int i;
5276 size_t alloc_size;
5277 int err;
5278
5279 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
Scott Feldman27b808c2015-06-01 11:39:02 -07005280 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
Dan Carpentere65ad3b2015-02-25 16:35:32 +03005281 if (!rocker->ports)
5282 return -ENOMEM;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005283 for (i = 0; i < rocker->port_count; i++) {
5284 err = rocker_probe_port(rocker, i);
5285 if (err)
5286 goto remove_ports;
5287 }
5288 return 0;
5289
5290remove_ports:
5291 rocker_remove_ports(rocker);
5292 return err;
5293}
5294
5295static int rocker_msix_init(struct rocker *rocker)
5296{
5297 struct pci_dev *pdev = rocker->pdev;
5298 int msix_entries;
5299 int i;
5300 int err;
5301
5302 msix_entries = pci_msix_vec_count(pdev);
5303 if (msix_entries < 0)
5304 return msix_entries;
5305
5306 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5307 return -EINVAL;
5308
5309 rocker->msix_entries = kmalloc_array(msix_entries,
5310 sizeof(struct msix_entry),
5311 GFP_KERNEL);
5312 if (!rocker->msix_entries)
5313 return -ENOMEM;
5314
5315 for (i = 0; i < msix_entries; i++)
5316 rocker->msix_entries[i].entry = i;
5317
5318 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5319 if (err < 0)
5320 goto err_enable_msix;
5321
5322 return 0;
5323
5324err_enable_msix:
5325 kfree(rocker->msix_entries);
5326 return err;
5327}
5328
Simon Hormane5054642015-05-25 14:28:36 +09005329static void rocker_msix_fini(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005330{
5331 pci_disable_msix(rocker->pdev);
5332 kfree(rocker->msix_entries);
5333}
5334
5335static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5336{
5337 struct rocker *rocker;
5338 int err;
5339
5340 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5341 if (!rocker)
5342 return -ENOMEM;
5343
5344 err = pci_enable_device(pdev);
5345 if (err) {
5346 dev_err(&pdev->dev, "pci_enable_device failed\n");
5347 goto err_pci_enable_device;
5348 }
5349
5350 err = pci_request_regions(pdev, rocker_driver_name);
5351 if (err) {
5352 dev_err(&pdev->dev, "pci_request_regions failed\n");
5353 goto err_pci_request_regions;
5354 }
5355
5356 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5357 if (!err) {
5358 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5359 if (err) {
5360 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5361 goto err_pci_set_dma_mask;
5362 }
5363 } else {
5364 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5365 if (err) {
5366 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5367 goto err_pci_set_dma_mask;
5368 }
5369 }
5370
5371 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5372 dev_err(&pdev->dev, "invalid PCI region size\n");
Wei Yongjun3122a922015-04-16 20:21:02 +08005373 err = -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005374 goto err_pci_resource_len_check;
5375 }
5376
5377 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5378 pci_resource_len(pdev, 0));
5379 if (!rocker->hw_addr) {
5380 dev_err(&pdev->dev, "ioremap failed\n");
5381 err = -EIO;
5382 goto err_ioremap;
5383 }
5384 pci_set_master(pdev);
5385
5386 rocker->pdev = pdev;
5387 pci_set_drvdata(pdev, rocker);
5388
5389 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5390
5391 err = rocker_msix_init(rocker);
5392 if (err) {
5393 dev_err(&pdev->dev, "MSI-X init failed\n");
5394 goto err_msix_init;
5395 }
5396
5397 err = rocker_basic_hw_test(rocker);
5398 if (err) {
5399 dev_err(&pdev->dev, "basic hw test failed\n");
5400 goto err_basic_hw_test;
5401 }
5402
5403 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5404
5405 err = rocker_dma_rings_init(rocker);
5406 if (err)
5407 goto err_dma_rings_init;
5408
5409 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5410 rocker_cmd_irq_handler, 0,
5411 rocker_driver_name, rocker);
5412 if (err) {
5413 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5414 goto err_request_cmd_irq;
5415 }
5416
5417 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5418 rocker_event_irq_handler, 0,
5419 rocker_driver_name, rocker);
5420 if (err) {
5421 dev_err(&pdev->dev, "cannot assign event irq\n");
5422 goto err_request_event_irq;
5423 }
5424
5425 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5426
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005427 err = rocker_init_tbls(rocker);
5428 if (err) {
5429 dev_err(&pdev->dev, "cannot init rocker tables\n");
5430 goto err_init_tbls;
5431 }
5432
Scott Feldman52fe3e22015-09-23 08:39:18 -07005433 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5434 (unsigned long) rocker);
5435 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5436
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005437 err = rocker_probe_ports(rocker);
5438 if (err) {
5439 dev_err(&pdev->dev, "failed to probe ports\n");
5440 goto err_probe_ports;
5441 }
5442
Scott Feldmanc8beb5b2015-08-12 18:44:13 -07005443 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5444 (int)sizeof(rocker->hw.id), &rocker->hw.id);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005445
5446 return 0;
5447
5448err_probe_ports:
Scott Feldman52fe3e22015-09-23 08:39:18 -07005449 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005450 rocker_free_tbls(rocker);
5451err_init_tbls:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005452 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5453err_request_event_irq:
5454 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5455err_request_cmd_irq:
5456 rocker_dma_rings_fini(rocker);
5457err_dma_rings_init:
5458err_basic_hw_test:
5459 rocker_msix_fini(rocker);
5460err_msix_init:
5461 iounmap(rocker->hw_addr);
5462err_ioremap:
5463err_pci_resource_len_check:
5464err_pci_set_dma_mask:
5465 pci_release_regions(pdev);
5466err_pci_request_regions:
5467 pci_disable_device(pdev);
5468err_pci_enable_device:
5469 kfree(rocker);
5470 return err;
5471}
5472
5473static void rocker_remove(struct pci_dev *pdev)
5474{
5475 struct rocker *rocker = pci_get_drvdata(pdev);
5476
Scott Feldman52fe3e22015-09-23 08:39:18 -07005477 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005478 rocker_free_tbls(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005479 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5480 rocker_remove_ports(rocker);
5481 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5482 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5483 rocker_dma_rings_fini(rocker);
5484 rocker_msix_fini(rocker);
5485 iounmap(rocker->hw_addr);
5486 pci_release_regions(rocker->pdev);
5487 pci_disable_device(rocker->pdev);
5488 kfree(rocker);
5489}
5490
5491static struct pci_driver rocker_pci_driver = {
5492 .name = rocker_driver_name,
5493 .id_table = rocker_pci_id_table,
5494 .probe = rocker_probe,
5495 .remove = rocker_remove,
5496};
5497
Scott Feldman6c707942014-11-28 14:34:28 +01005498/************************************
5499 * Net device notifier event handler
5500 ************************************/
5501
Simon Hormane5054642015-05-25 14:28:36 +09005502static bool rocker_port_dev_check(const struct net_device *dev)
Scott Feldman6c707942014-11-28 14:34:28 +01005503{
5504 return dev->netdev_ops == &rocker_port_netdev_ops;
5505}
5506
5507static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5508 struct net_device *bridge)
5509{
Scott Feldman027e00d2015-06-01 11:39:05 -07005510 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005511 int err;
5512
Scott Feldman027e00d2015-06-01 11:39:05 -07005513 /* Port is joining bridge, so the internal VLAN for the
5514 * port is going to change to the bridge internal VLAN.
5515 * Let's remove untagged VLAN (vid=0) from port and
5516 * re-add once internal VLAN has changed.
5517 */
5518
5519 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5520 if (err)
5521 return err;
5522
Simon Hormandf6a2062015-05-21 12:40:17 +09005523 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005524 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005525 rocker_port->internal_vlan_id =
5526 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005527
5528 rocker_port->bridge_dev = bridge;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005529 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
Scott Feldman6c707942014-11-28 14:34:28 +01005530
Jiri Pirko76c6f942015-09-24 10:02:44 +02005531 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005532}
5533
5534static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5535{
Scott Feldman027e00d2015-06-01 11:39:05 -07005536 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005537 int err;
5538
Scott Feldman027e00d2015-06-01 11:39:05 -07005539 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5540 if (err)
5541 return err;
5542
Simon Hormandf6a2062015-05-21 12:40:17 +09005543 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005544 rocker_port->bridge_dev->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005545 rocker_port->internal_vlan_id =
5546 rocker_port_internal_vlan_id_get(rocker_port,
5547 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005548
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005549 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5550 false);
Scott Feldman027e00d2015-06-01 11:39:05 -07005551 rocker_port->bridge_dev = NULL;
5552
Jiri Pirko76c6f942015-09-24 10:02:44 +02005553 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldmane47172a2015-02-25 20:15:38 -08005554 if (err)
5555 return err;
5556
5557 if (rocker_port->dev->flags & IFF_UP)
Jiri Pirko76c6f942015-09-24 10:02:44 +02005558 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005559
5560 return err;
5561}
5562
Simon Horman82549732015-07-16 10:39:14 +09005563static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5564 struct net_device *master)
5565{
5566 int err;
5567
5568 rocker_port->bridge_dev = master;
5569
Jiri Pirko76c6f942015-09-24 10:02:44 +02005570 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
Simon Horman82549732015-07-16 10:39:14 +09005571 if (err)
5572 return err;
Jiri Pirko76c6f942015-09-24 10:02:44 +02005573 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Simon Horman82549732015-07-16 10:39:14 +09005574
5575 return err;
5576}
5577
Jiri Pirko686ed302015-08-27 09:31:23 +02005578static int rocker_port_master_linked(struct rocker_port *rocker_port,
5579 struct net_device *master)
Scott Feldman6c707942014-11-28 14:34:28 +01005580{
Scott Feldman6c707942014-11-28 14:34:28 +01005581 int err = 0;
5582
Jiri Pirko686ed302015-08-27 09:31:23 +02005583 if (netif_is_bridge_master(master))
5584 err = rocker_port_bridge_join(rocker_port, master);
5585 else if (netif_is_ovs_master(master))
5586 err = rocker_port_ovs_changed(rocker_port, master);
5587 return err;
5588}
Scott Feldman6c707942014-11-28 14:34:28 +01005589
Jiri Pirko686ed302015-08-27 09:31:23 +02005590static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5591{
5592 int err = 0;
5593
5594 if (rocker_port_is_bridged(rocker_port))
5595 err = rocker_port_bridge_leave(rocker_port);
5596 else if (rocker_port_is_ovsed(rocker_port))
5597 err = rocker_port_ovs_changed(rocker_port, NULL);
Scott Feldman6c707942014-11-28 14:34:28 +01005598 return err;
5599}
5600
5601static int rocker_netdevice_event(struct notifier_block *unused,
5602 unsigned long event, void *ptr)
5603{
Jiri Pirko686ed302015-08-27 09:31:23 +02005604 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5605 struct netdev_notifier_changeupper_info *info;
5606 struct rocker_port *rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01005607 int err;
5608
Jiri Pirko686ed302015-08-27 09:31:23 +02005609 if (!rocker_port_dev_check(dev))
5610 return NOTIFY_DONE;
5611
Scott Feldman6c707942014-11-28 14:34:28 +01005612 switch (event) {
5613 case NETDEV_CHANGEUPPER:
Jiri Pirko686ed302015-08-27 09:31:23 +02005614 info = ptr;
5615 if (!info->master)
5616 goto out;
5617 rocker_port = netdev_priv(dev);
5618 if (info->linking) {
Jiri Pirkoe4201142016-02-16 15:14:45 +01005619 err = rocker_world_port_master_linked(rocker_port,
5620 info->upper_dev);
5621 if (err)
5622 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5623 err);
Jiri Pirko686ed302015-08-27 09:31:23 +02005624 err = rocker_port_master_linked(rocker_port,
5625 info->upper_dev);
5626 if (err)
5627 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5628 err);
5629 } else {
Jiri Pirkoe4201142016-02-16 15:14:45 +01005630 err = rocker_world_port_master_unlinked(rocker_port,
5631 info->upper_dev);
5632 if (err)
5633 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5634 err);
Jiri Pirko686ed302015-08-27 09:31:23 +02005635 err = rocker_port_master_unlinked(rocker_port);
5636 if (err)
5637 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5638 err);
5639 }
Scott Feldman6c707942014-11-28 14:34:28 +01005640 break;
5641 }
Jiri Pirko686ed302015-08-27 09:31:23 +02005642out:
Scott Feldman6c707942014-11-28 14:34:28 +01005643 return NOTIFY_DONE;
5644}
5645
5646static struct notifier_block rocker_netdevice_nb __read_mostly = {
5647 .notifier_call = rocker_netdevice_event,
5648};
5649
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005650/************************************
5651 * Net event notifier event handler
5652 ************************************/
5653
5654static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5655{
5656 struct rocker_port *rocker_port = netdev_priv(dev);
Scott Feldman02a9fbf2015-06-12 21:35:47 -07005657 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5658 ROCKER_OP_FLAG_NOWAIT;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005659 __be32 ip_addr = *(__be32 *)n->primary_key;
5660
Jiri Pirko76c6f942015-09-24 10:02:44 +02005661 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005662}
5663
5664static int rocker_netevent_event(struct notifier_block *unused,
5665 unsigned long event, void *ptr)
5666{
Jiri Pirkoe4201142016-02-16 15:14:45 +01005667 struct rocker_port *rocker_port;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005668 struct net_device *dev;
5669 struct neighbour *n = ptr;
5670 int err;
5671
5672 switch (event) {
5673 case NETEVENT_NEIGH_UPDATE:
5674 if (n->tbl != &arp_tbl)
5675 return NOTIFY_DONE;
5676 dev = n->dev;
5677 if (!rocker_port_dev_check(dev))
5678 return NOTIFY_DONE;
Jiri Pirkoe4201142016-02-16 15:14:45 +01005679 rocker_port = netdev_priv(dev);
5680 err = rocker_world_port_neigh_update(rocker_port, n);
5681 if (err)
5682 netdev_warn(dev, "failed to handle neigh update (err %d)\n",
5683 err);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005684 err = rocker_neigh_update(dev, n);
5685 if (err)
5686 netdev_warn(dev,
5687 "failed to handle neigh update (err %d)\n",
5688 err);
5689 break;
5690 }
5691
5692 return NOTIFY_DONE;
5693}
5694
5695static struct notifier_block rocker_netevent_nb __read_mostly = {
5696 .notifier_call = rocker_netevent_event,
5697};
5698
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005699/***********************
5700 * Module init and exit
5701 ***********************/
5702
5703static int __init rocker_module_init(void)
5704{
Scott Feldman6c707942014-11-28 14:34:28 +01005705 int err;
5706
5707 register_netdevice_notifier(&rocker_netdevice_nb);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005708 register_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005709 err = pci_register_driver(&rocker_pci_driver);
5710 if (err)
5711 goto err_pci_register_driver;
5712 return 0;
5713
5714err_pci_register_driver:
Gilad Ben-Yossefa076e6b2015-06-23 10:52:10 +03005715 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005716 unregister_netdevice_notifier(&rocker_netdevice_nb);
5717 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005718}
5719
5720static void __exit rocker_module_exit(void)
5721{
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005722 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005723 unregister_netdevice_notifier(&rocker_netdevice_nb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005724 pci_unregister_driver(&rocker_pci_driver);
5725}
5726
5727module_init(rocker_module_init);
5728module_exit(rocker_module_exit);
5729
5730MODULE_LICENSE("GPL v2");
5731MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5732MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5733MODULE_DESCRIPTION("Rocker switch device driver");
5734MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);