blob: 8585d983675c1c61c7f8b3eb4c1e26c93b5c736f [file] [log] [blame]
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010019#include <linux/hashtable.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010020#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
Scott Feldman6c707942014-11-28 14:34:28 +010031#include <linux/if_bridge.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010032#include <linux/bitops.h>
David Aherndb191702015-03-17 20:23:16 -060033#include <linux/ctype.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010034#include <net/switchdev.h>
35#include <net/rtnetlink.h>
Scott Feldmanc1beeef2015-03-05 21:21:20 -080036#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
Christoph Hellwig2f8e2c82015-08-28 09:27:14 +020039#include <linux/io-64-nonatomic-lo-hi.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010040#include <generated/utsrelease.h>
41
Jiri Pirko0fe685f2016-02-16 15:14:40 +010042#include "rocker_hw.h"
Jiri Pirkode152192016-02-16 15:14:42 +010043#include "rocker.h"
44#include "rocker_tlv.h"
Jiri Pirko4b8ac962014-11-28 14:34:26 +010045
46static const char rocker_driver_name[] = "rocker";
47
48static const struct pci_device_id rocker_pci_id_table[] = {
49 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
50 {0, }
51};
52
Scott Feldman9f6bbf72014-11-28 14:34:27 +010053struct rocker_flow_tbl_key {
54 u32 priority;
55 enum rocker_of_dpa_table_id tbl_id;
56 union {
57 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080058 u32 in_pport;
59 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010060 enum rocker_of_dpa_table_id goto_tbl;
61 } ig_port;
62 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080063 u32 in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010064 __be16 vlan_id;
65 __be16 vlan_id_mask;
66 enum rocker_of_dpa_table_id goto_tbl;
67 bool untagged;
68 __be16 new_vlan_id;
69 } vlan;
70 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080071 u32 in_pport;
72 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010073 __be16 eth_type;
74 u8 eth_dst[ETH_ALEN];
75 u8 eth_dst_mask[ETH_ALEN];
76 __be16 vlan_id;
77 __be16 vlan_id_mask;
78 enum rocker_of_dpa_table_id goto_tbl;
79 bool copy_to_cpu;
80 } term_mac;
81 struct {
82 __be16 eth_type;
83 __be32 dst4;
84 __be32 dst4_mask;
85 enum rocker_of_dpa_table_id goto_tbl;
86 u32 group_id;
87 } ucast_routing;
88 struct {
89 u8 eth_dst[ETH_ALEN];
90 u8 eth_dst_mask[ETH_ALEN];
91 int has_eth_dst;
92 int has_eth_dst_mask;
93 __be16 vlan_id;
94 u32 tunnel_id;
95 enum rocker_of_dpa_table_id goto_tbl;
96 u32 group_id;
97 bool copy_to_cpu;
98 } bridge;
99 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800100 u32 in_pport;
101 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100102 u8 eth_src[ETH_ALEN];
103 u8 eth_src_mask[ETH_ALEN];
104 u8 eth_dst[ETH_ALEN];
105 u8 eth_dst_mask[ETH_ALEN];
106 __be16 eth_type;
107 __be16 vlan_id;
108 __be16 vlan_id_mask;
109 u8 ip_proto;
110 u8 ip_proto_mask;
111 u8 ip_tos;
112 u8 ip_tos_mask;
113 u32 group_id;
114 } acl;
115 };
116};
117
118struct rocker_flow_tbl_entry {
119 struct hlist_node entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800120 u32 cmd;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100121 u64 cookie;
122 struct rocker_flow_tbl_key key;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800123 size_t key_len;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100124 u32 key_crc32; /* key */
125};
126
127struct rocker_group_tbl_entry {
128 struct hlist_node entry;
129 u32 cmd;
130 u32 group_id; /* key */
131 u16 group_count;
132 u32 *group_ids;
133 union {
134 struct {
135 u8 pop_vlan;
136 } l2_interface;
137 struct {
138 u8 eth_src[ETH_ALEN];
139 u8 eth_dst[ETH_ALEN];
140 __be16 vlan_id;
141 u32 group_id;
142 } l2_rewrite;
143 struct {
144 u8 eth_src[ETH_ALEN];
145 u8 eth_dst[ETH_ALEN];
146 __be16 vlan_id;
147 bool ttl_check;
148 u32 group_id;
149 } l3_unicast;
150 };
151};
152
153struct rocker_fdb_tbl_entry {
154 struct hlist_node entry;
155 u32 key_crc32; /* key */
156 bool learned;
Scott Feldmana471be42015-09-23 08:39:14 -0700157 unsigned long touched;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100158 struct rocker_fdb_tbl_key {
Scott Feldman4c660492015-09-23 08:39:15 -0700159 struct rocker_port *rocker_port;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100160 u8 addr[ETH_ALEN];
161 __be16 vlan_id;
162 } key;
163};
164
165struct rocker_internal_vlan_tbl_entry {
166 struct hlist_node entry;
167 int ifindex; /* key */
168 u32 ref_count;
169 __be16 vlan_id;
170};
171
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800172struct rocker_neigh_tbl_entry {
173 struct hlist_node entry;
174 __be32 ip_addr; /* key */
175 struct net_device *dev;
176 u32 ref_count;
177 u32 index;
178 u8 eth_dst[ETH_ALEN];
179 bool ttl_check;
180};
181
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100182static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
183static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
184static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
185static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
186static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
187static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
188static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
189static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
190static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
191
192/* Rocker priority levels for flow table entries. Higher
193 * priority match takes precedence over lower priority match.
194 */
195
196enum {
197 ROCKER_PRIORITY_UNKNOWN = 0,
198 ROCKER_PRIORITY_IG_PORT = 1,
199 ROCKER_PRIORITY_VLAN = 1,
200 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
201 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100202 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
203 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
204 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
205 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
206 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
207 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
208 ROCKER_PRIORITY_ACL_CTRL = 3,
209 ROCKER_PRIORITY_ACL_NORMAL = 2,
210 ROCKER_PRIORITY_ACL_DFLT = 1,
211};
212
213static bool rocker_vlan_id_is_internal(__be16 vlan_id)
214{
215 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
216 u16 end = 0xffe;
217 u16 _vlan_id = ntohs(vlan_id);
218
219 return (_vlan_id >= start && _vlan_id <= end);
220}
221
Simon Hormane5054642015-05-25 14:28:36 +0900222static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100223 u16 vid, bool *pop_vlan)
224{
225 __be16 vlan_id;
226
227 if (pop_vlan)
228 *pop_vlan = false;
229 vlan_id = htons(vid);
230 if (!vlan_id) {
231 vlan_id = rocker_port->internal_vlan_id;
232 if (pop_vlan)
233 *pop_vlan = true;
234 }
235
236 return vlan_id;
237}
238
Simon Hormane5054642015-05-25 14:28:36 +0900239static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +0100240 __be16 vlan_id)
241{
242 if (rocker_vlan_id_is_internal(vlan_id))
243 return 0;
244
245 return ntohs(vlan_id);
246}
247
Simon Hormane5054642015-05-25 14:28:36 +0900248static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +0100249{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200250 return rocker_port->bridge_dev &&
251 netif_is_bridge_master(rocker_port->bridge_dev);
Simon Horman82549732015-07-16 10:39:14 +0900252}
253
254static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
255{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200256 return rocker_port->bridge_dev &&
257 netif_is_ovs_master(rocker_port->bridge_dev);
Scott Feldman6c707942014-11-28 14:34:28 +0100258}
259
Scott Feldman179f9a22015-06-12 21:35:46 -0700260#define ROCKER_OP_FLAG_REMOVE BIT(0)
261#define ROCKER_OP_FLAG_NOWAIT BIT(1)
262#define ROCKER_OP_FLAG_LEARNED BIT(2)
263#define ROCKER_OP_FLAG_REFRESH BIT(3)
264
Jiri Pirkob15edf82016-02-16 15:14:39 +0100265static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
266 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700267{
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200268 struct switchdev_trans_item *elem = NULL;
Scott Feldman179f9a22015-06-12 21:35:46 -0700269 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
270 GFP_ATOMIC : GFP_KERNEL;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700271
272 /* If in transaction prepare phase, allocate the memory
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200273 * and enqueue it on a transaction. If in transaction
274 * commit phase, dequeue the memory from the transaction
Scott Feldmanc4f20322015-05-10 09:47:50 -0700275 * rather than re-allocating the memory. The idea is the
276 * driver code paths for prepare and commit are identical
277 * so the memory allocated in the prepare phase is the
278 * memory used in the commit phase.
279 */
280
Jiri Pirko76c6f942015-09-24 10:02:44 +0200281 if (!trans) {
282 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Jiri Pirko76c6f942015-09-24 10:02:44 +0200283 } else if (switchdev_trans_ph_prepare(trans)) {
Scott Feldman179f9a22015-06-12 21:35:46 -0700284 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700285 if (!elem)
286 return NULL;
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200287 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
Jiri Pirko76c6f942015-09-24 10:02:44 +0200288 } else {
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200289 elem = switchdev_trans_item_dequeue(trans);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700290 }
291
292 return elem ? elem + 1 : NULL;
293}
294
Jiri Pirkob15edf82016-02-16 15:14:39 +0100295static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
296 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700297{
Jiri Pirkob15edf82016-02-16 15:14:39 +0100298 return __rocker_mem_alloc(trans, flags, size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700299}
300
Jiri Pirkob15edf82016-02-16 15:14:39 +0100301static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
302 size_t n, size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700303{
Jiri Pirkob15edf82016-02-16 15:14:39 +0100304 return __rocker_mem_alloc(trans, flags, n * size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700305}
306
Jiri Pirkob15edf82016-02-16 15:14:39 +0100307static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700308{
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200309 struct switchdev_trans_item *elem;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700310
311 /* Frees are ignored if in transaction prepare phase. The
312 * memory remains on the per-port list until freed in the
313 * commit phase.
314 */
315
Jiri Pirko76c6f942015-09-24 10:02:44 +0200316 if (switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -0700317 return;
318
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200319 elem = (struct switchdev_trans_item *) mem - 1;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700320 kfree(elem);
321}
322
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100323struct rocker_wait {
324 wait_queue_head_t wait;
325 bool done;
Scott Feldman179f9a22015-06-12 21:35:46 -0700326 bool nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100327};
328
329static void rocker_wait_reset(struct rocker_wait *wait)
330{
331 wait->done = false;
Scott Feldman179f9a22015-06-12 21:35:46 -0700332 wait->nowait = false;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100333}
334
335static void rocker_wait_init(struct rocker_wait *wait)
336{
337 init_waitqueue_head(&wait->wait);
338 rocker_wait_reset(wait);
339}
340
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100341static struct rocker_wait *rocker_wait_create(void)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100342{
343 struct rocker_wait *wait;
344
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100345 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100346 if (!wait)
347 return NULL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100348 return wait;
349}
350
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100351static void rocker_wait_destroy(struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100352{
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100353 kfree(wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100354}
355
356static bool rocker_wait_event_timeout(struct rocker_wait *wait,
357 unsigned long timeout)
358{
359 wait_event_timeout(wait->wait, wait->done, HZ / 10);
360 if (!wait->done)
361 return false;
362 return true;
363}
364
365static void rocker_wait_wake_up(struct rocker_wait *wait)
366{
367 wait->done = true;
368 wake_up(&wait->wait);
369}
370
Simon Hormane5054642015-05-25 14:28:36 +0900371static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100372{
373 return rocker->msix_entries[vector].vector;
374}
375
Simon Hormane5054642015-05-25 14:28:36 +0900376static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100377{
378 return rocker_msix_vector(rocker_port->rocker,
379 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
380}
381
Simon Hormane5054642015-05-25 14:28:36 +0900382static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100383{
384 return rocker_msix_vector(rocker_port->rocker,
385 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
386}
387
388#define rocker_write32(rocker, reg, val) \
389 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
390#define rocker_read32(rocker, reg) \
391 readl((rocker)->hw_addr + (ROCKER_ ## reg))
392#define rocker_write64(rocker, reg, val) \
393 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
394#define rocker_read64(rocker, reg) \
395 readq((rocker)->hw_addr + (ROCKER_ ## reg))
396
397/*****************************
398 * HW basic testing functions
399 *****************************/
400
Simon Hormane5054642015-05-25 14:28:36 +0900401static int rocker_reg_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100402{
Simon Hormane5054642015-05-25 14:28:36 +0900403 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100404 u64 test_reg;
405 u64 rnd;
406
407 rnd = prandom_u32();
408 rnd >>= 1;
409 rocker_write32(rocker, TEST_REG, rnd);
410 test_reg = rocker_read32(rocker, TEST_REG);
411 if (test_reg != rnd * 2) {
412 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
413 test_reg, rnd * 2);
414 return -EIO;
415 }
416
417 rnd = prandom_u32();
418 rnd <<= 31;
419 rnd |= prandom_u32();
420 rocker_write64(rocker, TEST_REG64, rnd);
421 test_reg = rocker_read64(rocker, TEST_REG64);
422 if (test_reg != rnd * 2) {
423 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
424 test_reg, rnd * 2);
425 return -EIO;
426 }
427
428 return 0;
429}
430
Simon Hormane5054642015-05-25 14:28:36 +0900431static int rocker_dma_test_one(const struct rocker *rocker,
432 struct rocker_wait *wait, u32 test_type,
433 dma_addr_t dma_handle, const unsigned char *buf,
434 const unsigned char *expect, size_t size)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100435{
Simon Hormane5054642015-05-25 14:28:36 +0900436 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100437 int i;
438
439 rocker_wait_reset(wait);
440 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
441
442 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
443 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
444 return -EIO;
445 }
446
447 for (i = 0; i < size; i++) {
448 if (buf[i] != expect[i]) {
449 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
450 buf[i], i, expect[i]);
451 return -EIO;
452 }
453 }
454 return 0;
455}
456
457#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
458#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
459
Simon Hormane5054642015-05-25 14:28:36 +0900460static int rocker_dma_test_offset(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100461 struct rocker_wait *wait, int offset)
462{
463 struct pci_dev *pdev = rocker->pdev;
464 unsigned char *alloc;
465 unsigned char *buf;
466 unsigned char *expect;
467 dma_addr_t dma_handle;
468 int i;
469 int err;
470
471 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
472 GFP_KERNEL | GFP_DMA);
473 if (!alloc)
474 return -ENOMEM;
475 buf = alloc + offset;
476 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
477
478 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
479 PCI_DMA_BIDIRECTIONAL);
480 if (pci_dma_mapping_error(pdev, dma_handle)) {
481 err = -EIO;
482 goto free_alloc;
483 }
484
485 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
486 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
487
488 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
489 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
490 dma_handle, buf, expect,
491 ROCKER_TEST_DMA_BUF_SIZE);
492 if (err)
493 goto unmap;
494
495 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
496 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
497 dma_handle, buf, expect,
498 ROCKER_TEST_DMA_BUF_SIZE);
499 if (err)
500 goto unmap;
501
502 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
503 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
504 expect[i] = ~buf[i];
505 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
506 dma_handle, buf, expect,
507 ROCKER_TEST_DMA_BUF_SIZE);
508 if (err)
509 goto unmap;
510
511unmap:
512 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
513 PCI_DMA_BIDIRECTIONAL);
514free_alloc:
515 kfree(alloc);
516
517 return err;
518}
519
Simon Hormane5054642015-05-25 14:28:36 +0900520static int rocker_dma_test(const struct rocker *rocker,
521 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100522{
523 int i;
524 int err;
525
526 for (i = 0; i < 8; i++) {
527 err = rocker_dma_test_offset(rocker, wait, i);
528 if (err)
529 return err;
530 }
531 return 0;
532}
533
534static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
535{
536 struct rocker_wait *wait = dev_id;
537
538 rocker_wait_wake_up(wait);
539
540 return IRQ_HANDLED;
541}
542
Simon Hormane5054642015-05-25 14:28:36 +0900543static int rocker_basic_hw_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100544{
Simon Hormane5054642015-05-25 14:28:36 +0900545 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100546 struct rocker_wait wait;
547 int err;
548
549 err = rocker_reg_test(rocker);
550 if (err) {
551 dev_err(&pdev->dev, "reg test failed\n");
552 return err;
553 }
554
555 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
556 rocker_test_irq_handler, 0,
557 rocker_driver_name, &wait);
558 if (err) {
559 dev_err(&pdev->dev, "cannot assign test irq\n");
560 return err;
561 }
562
563 rocker_wait_init(&wait);
564 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
565
566 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
567 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
568 err = -EIO;
569 goto free_irq;
570 }
571
572 err = rocker_dma_test(rocker, &wait);
573 if (err)
574 dev_err(&pdev->dev, "dma test failed\n");
575
576free_irq:
577 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
578 return err;
579}
580
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100581/******************************************
582 * DMA rings and descriptors manipulations
583 ******************************************/
584
585static u32 __pos_inc(u32 pos, size_t limit)
586{
587 return ++pos == limit ? 0 : pos;
588}
589
Simon Hormane5054642015-05-25 14:28:36 +0900590static int rocker_desc_err(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100591{
Scott Feldman7eb344f2015-02-25 20:15:36 -0800592 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
593
594 switch (err) {
595 case ROCKER_OK:
596 return 0;
597 case -ROCKER_ENOENT:
598 return -ENOENT;
599 case -ROCKER_ENXIO:
600 return -ENXIO;
601 case -ROCKER_ENOMEM:
602 return -ENOMEM;
603 case -ROCKER_EEXIST:
604 return -EEXIST;
605 case -ROCKER_EINVAL:
606 return -EINVAL;
607 case -ROCKER_EMSGSIZE:
608 return -EMSGSIZE;
609 case -ROCKER_ENOTSUP:
610 return -EOPNOTSUPP;
611 case -ROCKER_ENOBUFS:
612 return -ENOBUFS;
613 }
614
615 return -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100616}
617
Simon Hormane5054642015-05-25 14:28:36 +0900618static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100619{
620 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
621}
622
Simon Hormane5054642015-05-25 14:28:36 +0900623static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100624{
625 u32 comp_err = desc_info->desc->comp_err;
626
627 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
628}
629
Jiri Pirko11ce2ba2016-02-16 15:14:41 +0100630static void *
631rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100632{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100633 return (void *)(uintptr_t)desc_info->desc->cookie;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100634}
635
Simon Hormane5054642015-05-25 14:28:36 +0900636static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100637 void *ptr)
638{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100639 desc_info->desc->cookie = (uintptr_t) ptr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100640}
641
642static struct rocker_desc_info *
Simon Hormane5054642015-05-25 14:28:36 +0900643rocker_desc_head_get(const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100644{
645 static struct rocker_desc_info *desc_info;
646 u32 head = __pos_inc(info->head, info->size);
647
648 desc_info = &info->desc_info[info->head];
649 if (head == info->tail)
650 return NULL; /* ring full */
651 desc_info->tlv_size = 0;
652 return desc_info;
653}
654
Simon Hormane5054642015-05-25 14:28:36 +0900655static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100656{
657 desc_info->desc->buf_size = desc_info->data_size;
658 desc_info->desc->tlv_size = desc_info->tlv_size;
659}
660
Simon Hormane5054642015-05-25 14:28:36 +0900661static void rocker_desc_head_set(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100662 struct rocker_dma_ring_info *info,
Simon Hormane5054642015-05-25 14:28:36 +0900663 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100664{
665 u32 head = __pos_inc(info->head, info->size);
666
667 BUG_ON(head == info->tail);
668 rocker_desc_commit(desc_info);
669 info->head = head;
670 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
671}
672
673static struct rocker_desc_info *
674rocker_desc_tail_get(struct rocker_dma_ring_info *info)
675{
676 static struct rocker_desc_info *desc_info;
677
678 if (info->tail == info->head)
679 return NULL; /* nothing to be done between head and tail */
680 desc_info = &info->desc_info[info->tail];
681 if (!rocker_desc_gen(desc_info))
682 return NULL; /* gen bit not set, desc is not ready yet */
683 info->tail = __pos_inc(info->tail, info->size);
684 desc_info->tlv_size = desc_info->desc->tlv_size;
685 return desc_info;
686}
687
Simon Hormane5054642015-05-25 14:28:36 +0900688static void rocker_dma_ring_credits_set(const struct rocker *rocker,
689 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100690 u32 credits)
691{
692 if (credits)
693 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
694}
695
696static unsigned long rocker_dma_ring_size_fix(size_t size)
697{
698 return max(ROCKER_DMA_SIZE_MIN,
699 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
700}
701
Simon Hormane5054642015-05-25 14:28:36 +0900702static int rocker_dma_ring_create(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100703 unsigned int type,
704 size_t size,
705 struct rocker_dma_ring_info *info)
706{
707 int i;
708
709 BUG_ON(size != rocker_dma_ring_size_fix(size));
710 info->size = size;
711 info->type = type;
712 info->head = 0;
713 info->tail = 0;
714 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
715 GFP_KERNEL);
716 if (!info->desc_info)
717 return -ENOMEM;
718
719 info->desc = pci_alloc_consistent(rocker->pdev,
720 info->size * sizeof(*info->desc),
721 &info->mapaddr);
722 if (!info->desc) {
723 kfree(info->desc_info);
724 return -ENOMEM;
725 }
726
727 for (i = 0; i < info->size; i++)
728 info->desc_info[i].desc = &info->desc[i];
729
730 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
731 ROCKER_DMA_DESC_CTRL_RESET);
732 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
733 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
734
735 return 0;
736}
737
Simon Hormane5054642015-05-25 14:28:36 +0900738static void rocker_dma_ring_destroy(const struct rocker *rocker,
739 const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100740{
741 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
742
743 pci_free_consistent(rocker->pdev,
744 info->size * sizeof(struct rocker_desc),
745 info->desc, info->mapaddr);
746 kfree(info->desc_info);
747}
748
Simon Hormane5054642015-05-25 14:28:36 +0900749static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100750 struct rocker_dma_ring_info *info)
751{
752 int i;
753
754 BUG_ON(info->head || info->tail);
755
756 /* When ring is consumer, we need to advance head for each desc.
757 * That tells hw that the desc is ready to be used by it.
758 */
759 for (i = 0; i < info->size - 1; i++)
760 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
761 rocker_desc_commit(&info->desc_info[i]);
762}
763
Simon Hormane5054642015-05-25 14:28:36 +0900764static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
765 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100766 int direction, size_t buf_size)
767{
768 struct pci_dev *pdev = rocker->pdev;
769 int i;
770 int err;
771
772 for (i = 0; i < info->size; i++) {
773 struct rocker_desc_info *desc_info = &info->desc_info[i];
774 struct rocker_desc *desc = &info->desc[i];
775 dma_addr_t dma_handle;
776 char *buf;
777
778 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
779 if (!buf) {
780 err = -ENOMEM;
781 goto rollback;
782 }
783
784 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
785 if (pci_dma_mapping_error(pdev, dma_handle)) {
786 kfree(buf);
787 err = -EIO;
788 goto rollback;
789 }
790
791 desc_info->data = buf;
792 desc_info->data_size = buf_size;
793 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
794
795 desc->buf_addr = dma_handle;
796 desc->buf_size = buf_size;
797 }
798 return 0;
799
800rollback:
801 for (i--; i >= 0; i--) {
Simon Hormane5054642015-05-25 14:28:36 +0900802 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100803
804 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
805 desc_info->data_size, direction);
806 kfree(desc_info->data);
807 }
808 return err;
809}
810
Simon Hormane5054642015-05-25 14:28:36 +0900811static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
812 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100813 int direction)
814{
815 struct pci_dev *pdev = rocker->pdev;
816 int i;
817
818 for (i = 0; i < info->size; i++) {
Simon Hormane5054642015-05-25 14:28:36 +0900819 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100820 struct rocker_desc *desc = &info->desc[i];
821
822 desc->buf_addr = 0;
823 desc->buf_size = 0;
824 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
825 desc_info->data_size, direction);
826 kfree(desc_info->data);
827 }
828}
829
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100830static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info)
831{
832 struct rocker_wait *wait;
833
834 wait = rocker_wait_create();
835 if (!wait)
836 return -ENOMEM;
837 rocker_desc_cookie_ptr_set(desc_info, wait);
838 return 0;
839}
840
841static void
842rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info)
843{
844 struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info);
845
846 rocker_wait_destroy(wait);
847}
848
849static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker)
850{
851 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
852 int i;
853 int err;
854
855 for (i = 0; i < cmd_ring->size; i++) {
856 err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]);
857 if (err)
858 goto rollback;
859 }
860 return 0;
861
862rollback:
863 for (i--; i >= 0; i--)
864 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
865 return err;
866}
867
868static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker)
869{
870 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
871 int i;
872
873 for (i = 0; i < cmd_ring->size; i++)
874 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
875}
876
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100877static int rocker_dma_rings_init(struct rocker *rocker)
878{
Simon Hormane5054642015-05-25 14:28:36 +0900879 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100880 int err;
881
882 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
883 ROCKER_DMA_CMD_DEFAULT_SIZE,
884 &rocker->cmd_ring);
885 if (err) {
886 dev_err(&pdev->dev, "failed to create command dma ring\n");
887 return err;
888 }
889
890 spin_lock_init(&rocker->cmd_ring_lock);
891
892 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
893 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
894 if (err) {
895 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
896 goto err_dma_cmd_ring_bufs_alloc;
897 }
898
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100899 err = rocker_dma_cmd_ring_waits_alloc(rocker);
900 if (err) {
901 dev_err(&pdev->dev, "failed to alloc command dma ring waits\n");
902 goto err_dma_cmd_ring_waits_alloc;
903 }
904
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100905 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
906 ROCKER_DMA_EVENT_DEFAULT_SIZE,
907 &rocker->event_ring);
908 if (err) {
909 dev_err(&pdev->dev, "failed to create event dma ring\n");
910 goto err_dma_event_ring_create;
911 }
912
913 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
914 PCI_DMA_FROMDEVICE, PAGE_SIZE);
915 if (err) {
916 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
917 goto err_dma_event_ring_bufs_alloc;
918 }
919 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
920 return 0;
921
922err_dma_event_ring_bufs_alloc:
923 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
924err_dma_event_ring_create:
925 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
926 PCI_DMA_BIDIRECTIONAL);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100927err_dma_cmd_ring_waits_alloc:
928 rocker_dma_cmd_ring_waits_free(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100929err_dma_cmd_ring_bufs_alloc:
930 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
931 return err;
932}
933
934static void rocker_dma_rings_fini(struct rocker *rocker)
935{
936 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
937 PCI_DMA_BIDIRECTIONAL);
938 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +0100939 rocker_dma_cmd_ring_waits_free(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100940 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
941 PCI_DMA_BIDIRECTIONAL);
942 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
943}
944
Simon Horman534ba6a2015-06-01 13:25:04 +0900945static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100946 struct rocker_desc_info *desc_info,
947 struct sk_buff *skb, size_t buf_len)
948{
Simon Horman534ba6a2015-06-01 13:25:04 +0900949 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100950 struct pci_dev *pdev = rocker->pdev;
951 dma_addr_t dma_handle;
952
953 dma_handle = pci_map_single(pdev, skb->data, buf_len,
954 PCI_DMA_FROMDEVICE);
955 if (pci_dma_mapping_error(pdev, dma_handle))
956 return -EIO;
957 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
958 goto tlv_put_failure;
959 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
960 goto tlv_put_failure;
961 return 0;
962
963tlv_put_failure:
964 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
965 desc_info->tlv_size = 0;
966 return -EMSGSIZE;
967}
968
Simon Hormane5054642015-05-25 14:28:36 +0900969static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100970{
971 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
972}
973
Simon Horman534ba6a2015-06-01 13:25:04 +0900974static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100975 struct rocker_desc_info *desc_info)
976{
977 struct net_device *dev = rocker_port->dev;
978 struct sk_buff *skb;
979 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
980 int err;
981
982 /* Ensure that hw will see tlv_size zero in case of an error.
983 * That tells hw to use another descriptor.
984 */
985 rocker_desc_cookie_ptr_set(desc_info, NULL);
986 desc_info->tlv_size = 0;
987
988 skb = netdev_alloc_skb_ip_align(dev, buf_len);
989 if (!skb)
990 return -ENOMEM;
Simon Horman534ba6a2015-06-01 13:25:04 +0900991 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100992 if (err) {
993 dev_kfree_skb_any(skb);
994 return err;
995 }
996 rocker_desc_cookie_ptr_set(desc_info, skb);
997 return 0;
998}
999
Simon Hormane5054642015-05-25 14:28:36 +09001000static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1001 const struct rocker_tlv **attrs)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001002{
1003 struct pci_dev *pdev = rocker->pdev;
1004 dma_addr_t dma_handle;
1005 size_t len;
1006
1007 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1008 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1009 return;
1010 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1011 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1012 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1013}
1014
Simon Hormane5054642015-05-25 14:28:36 +09001015static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1016 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001017{
Simon Hormane5054642015-05-25 14:28:36 +09001018 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001019 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1020
1021 if (!skb)
1022 return;
1023 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1024 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1025 dev_kfree_skb_any(skb);
1026}
1027
Simon Horman534ba6a2015-06-01 13:25:04 +09001028static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001029{
Simon Hormane5054642015-05-25 14:28:36 +09001030 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001031 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001032 int i;
1033 int err;
1034
1035 for (i = 0; i < rx_ring->size; i++) {
Simon Horman534ba6a2015-06-01 13:25:04 +09001036 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001037 &rx_ring->desc_info[i]);
1038 if (err)
1039 goto rollback;
1040 }
1041 return 0;
1042
1043rollback:
1044 for (i--; i >= 0; i--)
1045 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1046 return err;
1047}
1048
Simon Horman534ba6a2015-06-01 13:25:04 +09001049static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001050{
Simon Hormane5054642015-05-25 14:28:36 +09001051 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001052 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001053 int i;
1054
1055 for (i = 0; i < rx_ring->size; i++)
1056 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1057}
1058
1059static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1060{
1061 struct rocker *rocker = rocker_port->rocker;
1062 int err;
1063
1064 err = rocker_dma_ring_create(rocker,
1065 ROCKER_DMA_TX(rocker_port->port_number),
1066 ROCKER_DMA_TX_DEFAULT_SIZE,
1067 &rocker_port->tx_ring);
1068 if (err) {
1069 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1070 return err;
1071 }
1072
1073 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1074 PCI_DMA_TODEVICE,
1075 ROCKER_DMA_TX_DESC_SIZE);
1076 if (err) {
1077 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1078 goto err_dma_tx_ring_bufs_alloc;
1079 }
1080
1081 err = rocker_dma_ring_create(rocker,
1082 ROCKER_DMA_RX(rocker_port->port_number),
1083 ROCKER_DMA_RX_DEFAULT_SIZE,
1084 &rocker_port->rx_ring);
1085 if (err) {
1086 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1087 goto err_dma_rx_ring_create;
1088 }
1089
1090 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1091 PCI_DMA_BIDIRECTIONAL,
1092 ROCKER_DMA_RX_DESC_SIZE);
1093 if (err) {
1094 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1095 goto err_dma_rx_ring_bufs_alloc;
1096 }
1097
Simon Horman534ba6a2015-06-01 13:25:04 +09001098 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001099 if (err) {
1100 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1101 goto err_dma_rx_ring_skbs_alloc;
1102 }
1103 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1104
1105 return 0;
1106
1107err_dma_rx_ring_skbs_alloc:
1108 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1109 PCI_DMA_BIDIRECTIONAL);
1110err_dma_rx_ring_bufs_alloc:
1111 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1112err_dma_rx_ring_create:
1113 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1114 PCI_DMA_TODEVICE);
1115err_dma_tx_ring_bufs_alloc:
1116 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1117 return err;
1118}
1119
1120static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1121{
1122 struct rocker *rocker = rocker_port->rocker;
1123
Simon Horman534ba6a2015-06-01 13:25:04 +09001124 rocker_dma_rx_ring_skbs_free(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001125 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1126 PCI_DMA_BIDIRECTIONAL);
1127 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1128 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1129 PCI_DMA_TODEVICE);
1130 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1131}
1132
Simon Hormane5054642015-05-25 14:28:36 +09001133static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1134 bool enable)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001135{
1136 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1137
1138 if (enable)
David S. Miller71a83a62015-03-03 21:16:48 -05001139 val |= 1ULL << rocker_port->pport;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001140 else
David S. Miller71a83a62015-03-03 21:16:48 -05001141 val &= ~(1ULL << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001142 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1143}
1144
1145/********************************
1146 * Interrupt handler and helpers
1147 ********************************/
1148
1149static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1150{
1151 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001152 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001153 struct rocker_wait *wait;
1154 u32 credits = 0;
1155
1156 spin_lock(&rocker->cmd_ring_lock);
1157 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1158 wait = rocker_desc_cookie_ptr_get(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001159 if (wait->nowait) {
1160 rocker_desc_gen_clear(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001161 } else {
1162 rocker_wait_wake_up(wait);
1163 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001164 credits++;
1165 }
1166 spin_unlock(&rocker->cmd_ring_lock);
1167 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1168
1169 return IRQ_HANDLED;
1170}
1171
Simon Hormane5054642015-05-25 14:28:36 +09001172static void rocker_port_link_up(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001173{
1174 netif_carrier_on(rocker_port->dev);
1175 netdev_info(rocker_port->dev, "Link is up\n");
1176}
1177
Simon Hormane5054642015-05-25 14:28:36 +09001178static void rocker_port_link_down(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001179{
1180 netif_carrier_off(rocker_port->dev);
1181 netdev_info(rocker_port->dev, "Link is down\n");
1182}
1183
Simon Hormane5054642015-05-25 14:28:36 +09001184static int rocker_event_link_change(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001185 const struct rocker_tlv *info)
1186{
Simon Hormane5054642015-05-25 14:28:36 +09001187 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001188 unsigned int port_number;
1189 bool link_up;
1190 struct rocker_port *rocker_port;
1191
1192 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001193 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001194 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1195 return -EIO;
1196 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001197 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001198 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1199
1200 if (port_number >= rocker->port_count)
1201 return -EINVAL;
1202
1203 rocker_port = rocker->ports[port_number];
1204 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1205 if (link_up)
1206 rocker_port_link_up(rocker_port);
1207 else
1208 rocker_port_link_down(rocker_port);
1209 }
1210
1211 return 0;
1212}
1213
Scott Feldman6c707942014-11-28 14:34:28 +01001214static int rocker_port_fdb(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02001215 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01001216 const unsigned char *addr,
1217 __be16 vlan_id, int flags);
Jiri Pirkoe4201142016-02-16 15:14:45 +01001218static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1219 const unsigned char *addr,
1220 __be16 vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01001221
Simon Hormane5054642015-05-25 14:28:36 +09001222static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
Scott Feldman6c707942014-11-28 14:34:28 +01001223 const struct rocker_tlv *info)
1224{
Simon Hormane5054642015-05-25 14:28:36 +09001225 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
Scott Feldman6c707942014-11-28 14:34:28 +01001226 unsigned int port_number;
1227 struct rocker_port *rocker_port;
Simon Hormane5054642015-05-25 14:28:36 +09001228 const unsigned char *addr;
Scott Feldman92014b92015-06-12 21:35:49 -07001229 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
Scott Feldman6c707942014-11-28 14:34:28 +01001230 __be16 vlan_id;
Jiri Pirkoe4201142016-02-16 15:14:45 +01001231 int err;
Scott Feldman6c707942014-11-28 14:34:28 +01001232
1233 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001234 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
Scott Feldman6c707942014-11-28 14:34:28 +01001235 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1236 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1237 return -EIO;
1238 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001239 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
Scott Feldman6c707942014-11-28 14:34:28 +01001240 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
Jiri Pirko9b03c712014-12-03 14:14:53 +01001241 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
Scott Feldman6c707942014-11-28 14:34:28 +01001242
1243 if (port_number >= rocker->port_count)
1244 return -EINVAL;
1245
1246 rocker_port = rocker->ports[port_number];
1247
Jiri Pirkoe4201142016-02-16 15:14:45 +01001248 err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1249 if (err)
1250 return err;
1251
Scott Feldman6c707942014-11-28 14:34:28 +01001252 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1253 rocker_port->stp_state != BR_STATE_FORWARDING)
1254 return 0;
1255
Jiri Pirko76c6f942015-09-24 10:02:44 +02001256 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01001257}
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001258
Simon Hormane5054642015-05-25 14:28:36 +09001259static int rocker_event_process(const struct rocker *rocker,
1260 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001261{
Simon Hormane5054642015-05-25 14:28:36 +09001262 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1263 const struct rocker_tlv *info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001264 u16 type;
1265
1266 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1267 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1268 !attrs[ROCKER_TLV_EVENT_INFO])
1269 return -EIO;
1270
1271 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1272 info = attrs[ROCKER_TLV_EVENT_INFO];
1273
1274 switch (type) {
1275 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1276 return rocker_event_link_change(rocker, info);
Scott Feldman6c707942014-11-28 14:34:28 +01001277 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1278 return rocker_event_mac_vlan_seen(rocker, info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001279 }
1280
1281 return -EOPNOTSUPP;
1282}
1283
1284static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1285{
1286 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001287 const struct pci_dev *pdev = rocker->pdev;
1288 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001289 u32 credits = 0;
1290 int err;
1291
1292 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1293 err = rocker_desc_err(desc_info);
1294 if (err) {
1295 dev_err(&pdev->dev, "event desc received with err %d\n",
1296 err);
1297 } else {
1298 err = rocker_event_process(rocker, desc_info);
1299 if (err)
1300 dev_err(&pdev->dev, "event processing failed with err %d\n",
1301 err);
1302 }
1303 rocker_desc_gen_clear(desc_info);
1304 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1305 credits++;
1306 }
1307 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1308
1309 return IRQ_HANDLED;
1310}
1311
1312static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1313{
1314 struct rocker_port *rocker_port = dev_id;
1315
1316 napi_schedule(&rocker_port->napi_tx);
1317 return IRQ_HANDLED;
1318}
1319
1320static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1321{
1322 struct rocker_port *rocker_port = dev_id;
1323
1324 napi_schedule(&rocker_port->napi_rx);
1325 return IRQ_HANDLED;
1326}
1327
1328/********************
1329 * Command interface
1330 ********************/
1331
Simon Horman534ba6a2015-06-01 13:25:04 +09001332typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001333 struct rocker_desc_info *desc_info,
1334 void *priv);
1335
Simon Horman534ba6a2015-06-01 13:25:04 +09001336typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001337 const struct rocker_desc_info *desc_info,
1338 void *priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001339
Simon Horman534ba6a2015-06-01 13:25:04 +09001340static int rocker_cmd_exec(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02001341 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09001342 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1343 rocker_cmd_proc_cb_t process, void *process_priv)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001344{
Simon Horman534ba6a2015-06-01 13:25:04 +09001345 struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001346 struct rocker_desc_info *desc_info;
1347 struct rocker_wait *wait;
Scott Feldman179f9a22015-06-12 21:35:46 -07001348 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1349 unsigned long lock_flags;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001350 int err;
1351
Scott Feldman179f9a22015-06-12 21:35:46 -07001352 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001353
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001354 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1355 if (!desc_info) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001356 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +01001357 return -EAGAIN;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001358 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001359
Jiri Pirkoca0a5f22016-02-16 15:14:47 +01001360 wait = rocker_desc_cookie_ptr_get(desc_info);
1361 rocker_wait_init(wait);
1362 wait->nowait = nowait;
1363
Simon Horman534ba6a2015-06-01 13:25:04 +09001364 err = prepare(rocker_port, desc_info, prepare_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001365 if (err) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001366 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirkoca0a5f22016-02-16 15:14:47 +01001367 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001368 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001369
Jiri Pirko76c6f942015-09-24 10:02:44 +02001370 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07001371 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1372
Scott Feldman179f9a22015-06-12 21:35:46 -07001373 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1374
1375 if (nowait)
1376 return 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001377
Jiri Pirko76c6f942015-09-24 10:02:44 +02001378 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07001379 if (!rocker_wait_event_timeout(wait, HZ / 10))
1380 return -EIO;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001381
1382 err = rocker_desc_err(desc_info);
1383 if (err)
1384 return err;
1385
1386 if (process)
Simon Horman534ba6a2015-06-01 13:25:04 +09001387 err = process(rocker_port, desc_info, process_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001388
1389 rocker_desc_gen_clear(desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001390 return err;
1391}
1392
1393static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001394rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001395 struct rocker_desc_info *desc_info,
1396 void *priv)
1397{
1398 struct rocker_tlv *cmd_info;
1399
1400 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1401 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1402 return -EMSGSIZE;
1403 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1404 if (!cmd_info)
1405 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001406 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1407 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001408 return -EMSGSIZE;
1409 rocker_tlv_nest_end(desc_info, cmd_info);
1410 return 0;
1411}
1412
1413static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001414rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001415 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001416 void *priv)
1417{
1418 struct ethtool_cmd *ecmd = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001419 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1420 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001421 u32 speed;
1422 u8 duplex;
1423 u8 autoneg;
1424
1425 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1426 if (!attrs[ROCKER_TLV_CMD_INFO])
1427 return -EIO;
1428
1429 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1430 attrs[ROCKER_TLV_CMD_INFO]);
1431 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1432 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1433 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1434 return -EIO;
1435
1436 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1437 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1438 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1439
1440 ecmd->transceiver = XCVR_INTERNAL;
1441 ecmd->supported = SUPPORTED_TP;
1442 ecmd->phy_address = 0xff;
1443 ecmd->port = PORT_TP;
1444 ethtool_cmd_speed_set(ecmd, speed);
1445 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1446 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1447
1448 return 0;
1449}
1450
1451static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001452rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001453 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001454 void *priv)
1455{
1456 unsigned char *macaddr = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001457 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1458 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1459 const struct rocker_tlv *attr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001460
1461 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1462 if (!attrs[ROCKER_TLV_CMD_INFO])
1463 return -EIO;
1464
1465 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1466 attrs[ROCKER_TLV_CMD_INFO]);
1467 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1468 if (!attr)
1469 return -EIO;
1470
1471 if (rocker_tlv_len(attr) != ETH_ALEN)
1472 return -EINVAL;
1473
1474 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1475 return 0;
1476}
1477
Jiri Pirkoe1ba3de2016-02-16 15:14:43 +01001478static int
1479rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1480 const struct rocker_desc_info *desc_info,
1481 void *priv)
1482{
1483 u8 *p_mode = priv;
1484 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1485 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1486 const struct rocker_tlv *attr;
1487
1488 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1489 if (!attrs[ROCKER_TLV_CMD_INFO])
1490 return -EIO;
1491
1492 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1493 attrs[ROCKER_TLV_CMD_INFO]);
1494 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1495 if (!attr)
1496 return -EIO;
1497
1498 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1499 return 0;
1500}
1501
David Aherndb191702015-03-17 20:23:16 -06001502struct port_name {
1503 char *buf;
1504 size_t len;
1505};
1506
1507static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001508rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001509 const struct rocker_desc_info *desc_info,
David Aherndb191702015-03-17 20:23:16 -06001510 void *priv)
1511{
Simon Hormane5054642015-05-25 14:28:36 +09001512 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1513 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
David Aherndb191702015-03-17 20:23:16 -06001514 struct port_name *name = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001515 const struct rocker_tlv *attr;
David Aherndb191702015-03-17 20:23:16 -06001516 size_t i, j, len;
Simon Hormane5054642015-05-25 14:28:36 +09001517 const char *str;
David Aherndb191702015-03-17 20:23:16 -06001518
1519 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1520 if (!attrs[ROCKER_TLV_CMD_INFO])
1521 return -EIO;
1522
1523 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1524 attrs[ROCKER_TLV_CMD_INFO]);
1525 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1526 if (!attr)
1527 return -EIO;
1528
1529 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1530 str = rocker_tlv_data(attr);
1531
1532 /* make sure name only contains alphanumeric characters */
1533 for (i = j = 0; i < len; ++i) {
1534 if (isalnum(str[i])) {
1535 name->buf[j] = str[i];
1536 j++;
1537 }
1538 }
1539
1540 if (j == 0)
1541 return -EIO;
1542
1543 name->buf[j] = '\0';
1544
1545 return 0;
1546}
1547
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001548static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001549rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001550 struct rocker_desc_info *desc_info,
1551 void *priv)
1552{
1553 struct ethtool_cmd *ecmd = priv;
1554 struct rocker_tlv *cmd_info;
1555
1556 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1557 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1558 return -EMSGSIZE;
1559 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1560 if (!cmd_info)
1561 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001562 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1563 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001564 return -EMSGSIZE;
1565 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1566 ethtool_cmd_speed(ecmd)))
1567 return -EMSGSIZE;
1568 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1569 ecmd->duplex))
1570 return -EMSGSIZE;
1571 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1572 ecmd->autoneg))
1573 return -EMSGSIZE;
1574 rocker_tlv_nest_end(desc_info, cmd_info);
1575 return 0;
1576}
1577
1578static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001579rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001580 struct rocker_desc_info *desc_info,
1581 void *priv)
1582{
Simon Hormane5054642015-05-25 14:28:36 +09001583 const unsigned char *macaddr = priv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001584 struct rocker_tlv *cmd_info;
1585
1586 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1587 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1588 return -EMSGSIZE;
1589 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1590 if (!cmd_info)
1591 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001592 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1593 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001594 return -EMSGSIZE;
1595 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1596 ETH_ALEN, macaddr))
1597 return -EMSGSIZE;
1598 rocker_tlv_nest_end(desc_info, cmd_info);
1599 return 0;
1600}
1601
Scott Feldman5111f802014-11-28 14:34:30 +01001602static int
Scott Feldman77a58c72015-07-08 16:06:47 -07001603rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1604 struct rocker_desc_info *desc_info,
1605 void *priv)
1606{
1607 int mtu = *(int *)priv;
1608 struct rocker_tlv *cmd_info;
1609
1610 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1611 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1612 return -EMSGSIZE;
1613 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1614 if (!cmd_info)
1615 return -EMSGSIZE;
1616 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1617 rocker_port->pport))
1618 return -EMSGSIZE;
1619 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1620 mtu))
1621 return -EMSGSIZE;
1622 rocker_tlv_nest_end(desc_info, cmd_info);
1623 return 0;
1624}
1625
1626static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001627rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
Scott Feldman5111f802014-11-28 14:34:30 +01001628 struct rocker_desc_info *desc_info,
1629 void *priv)
1630{
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001631 bool learning = *(bool *)priv;
Scott Feldman5111f802014-11-28 14:34:30 +01001632 struct rocker_tlv *cmd_info;
1633
1634 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1635 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1636 return -EMSGSIZE;
1637 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1638 if (!cmd_info)
1639 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001640 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1641 rocker_port->pport))
Scott Feldman5111f802014-11-28 14:34:30 +01001642 return -EMSGSIZE;
1643 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001644 learning))
Scott Feldman5111f802014-11-28 14:34:30 +01001645 return -EMSGSIZE;
1646 rocker_tlv_nest_end(desc_info, cmd_info);
1647 return 0;
1648}
1649
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001650static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1651 struct ethtool_cmd *ecmd)
1652{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001653 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001654 rocker_cmd_get_port_settings_prep, NULL,
1655 rocker_cmd_get_port_settings_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001656 ecmd);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001657}
1658
1659static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1660 unsigned char *macaddr)
1661{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001662 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001663 rocker_cmd_get_port_settings_prep, NULL,
1664 rocker_cmd_get_port_settings_macaddr_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001665 macaddr);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001666}
1667
Jiri Pirkoe1ba3de2016-02-16 15:14:43 +01001668static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1669 u8 *p_mode)
1670{
1671 return rocker_cmd_exec(rocker_port, NULL, 0,
1672 rocker_cmd_get_port_settings_prep, NULL,
1673 rocker_cmd_get_port_settings_mode_proc, p_mode);
1674}
1675
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001676static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1677 struct ethtool_cmd *ecmd)
1678{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001679 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001680 rocker_cmd_set_port_settings_ethtool_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001681 ecmd, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001682}
1683
1684static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1685 unsigned char *macaddr)
1686{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001687 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001688 rocker_cmd_set_port_settings_macaddr_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001689 macaddr, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001690}
1691
Scott Feldman77a58c72015-07-08 16:06:47 -07001692static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1693 int mtu)
1694{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001695 return rocker_cmd_exec(rocker_port, NULL, 0,
Scott Feldman77a58c72015-07-08 16:06:47 -07001696 rocker_cmd_set_port_settings_mtu_prep,
1697 &mtu, NULL, NULL);
1698}
1699
Scott Feldmanc4f20322015-05-10 09:47:50 -07001700static int rocker_port_set_learning(struct rocker_port *rocker_port,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001701 struct switchdev_trans *trans,
1702 bool learning)
Scott Feldman5111f802014-11-28 14:34:30 +01001703{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001704 return rocker_cmd_exec(rocker_port, trans, 0,
Scott Feldman5111f802014-11-28 14:34:30 +01001705 rocker_cmd_set_port_learning_prep,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001706 &learning, NULL, NULL);
Scott Feldman5111f802014-11-28 14:34:30 +01001707}
1708
Jiri Pirkoe4201142016-02-16 15:14:45 +01001709/**********************
1710 * Worlds manipulation
1711 **********************/
1712
1713static struct rocker_world_ops *rocker_world_ops[] = {
1714 &rocker_ofdpa_ops,
1715};
1716
1717#define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1718
1719static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1720{
1721 int i;
1722
1723 for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1724 if (rocker_world_ops[i]->mode == mode)
1725 return rocker_world_ops[i];
1726 return NULL;
1727}
1728
1729static int rocker_world_init(struct rocker *rocker, u8 mode)
1730{
1731 struct rocker_world_ops *wops;
1732 int err;
1733
1734 wops = rocker_world_ops_find(mode);
1735 if (!wops) {
1736 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1737 mode);
1738 return -EINVAL;
1739 }
1740 rocker->wops = wops;
1741 rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1742 if (!rocker->wpriv)
1743 return -ENOMEM;
1744 if (!wops->init)
1745 return 0;
1746 err = wops->init(rocker);
1747 if (err)
1748 kfree(rocker->wpriv);
1749 return err;
1750}
1751
1752static void rocker_world_fini(struct rocker *rocker)
1753{
1754 struct rocker_world_ops *wops = rocker->wops;
1755
1756 if (!wops || !wops->fini)
1757 return;
1758 wops->fini(rocker);
1759 kfree(rocker->wpriv);
1760}
1761
1762static int rocker_world_check_init(struct rocker_port *rocker_port)
1763{
1764 struct rocker *rocker = rocker_port->rocker;
1765 u8 mode;
1766 int err;
1767
1768 err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1769 if (err) {
1770 dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1771 return err;
1772 }
1773 if (rocker->wops) {
1774 if (rocker->wops->mode != mode) {
1775 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1776 return err;
1777 }
1778 return 0;
1779 }
1780 return rocker_world_init(rocker, mode);
1781}
1782
1783static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1784{
1785 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1786 int err;
1787
1788 rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1789 if (!rocker_port->wpriv)
1790 return -ENOMEM;
1791 if (!wops->port_pre_init)
1792 return 0;
1793 err = wops->port_pre_init(rocker_port);
1794 if (err)
1795 kfree(rocker_port->wpriv);
1796 return 0;
1797}
1798
1799static int rocker_world_port_init(struct rocker_port *rocker_port)
1800{
1801 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1802
1803 if (!wops->port_init)
1804 return 0;
1805 return wops->port_init(rocker_port);
1806}
1807
1808static void rocker_world_port_fini(struct rocker_port *rocker_port)
1809{
1810 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1811
1812 if (!wops->port_fini)
1813 return;
1814 wops->port_fini(rocker_port);
1815}
1816
1817static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1818{
1819 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1820
1821 if (!wops->port_post_fini)
1822 return;
1823 wops->port_post_fini(rocker_port);
1824 kfree(rocker_port->wpriv);
1825}
1826
1827static int rocker_world_port_open(struct rocker_port *rocker_port)
1828{
1829 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1830
1831 if (!wops->port_open)
1832 return 0;
1833 return wops->port_open(rocker_port);
1834}
1835
1836static void rocker_world_port_stop(struct rocker_port *rocker_port)
1837{
1838 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1839
1840 if (!wops->port_stop)
1841 return;
1842 wops->port_stop(rocker_port);
1843}
1844
1845static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1846 u8 state,
1847 struct switchdev_trans *trans)
1848{
1849 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1850
1851 if (!wops->port_attr_stp_state_set)
1852 return 0;
1853 return wops->port_attr_stp_state_set(rocker_port, state, trans);
1854}
1855
1856static int
1857rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1858 unsigned long brport_flags,
1859 struct switchdev_trans *trans)
1860{
1861 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1862
1863 if (!wops->port_attr_bridge_flags_set)
1864 return 0;
1865 return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
1866 trans);
1867}
1868
1869static int
1870rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
1871 unsigned long *p_brport_flags)
1872{
1873 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1874
1875 if (!wops->port_attr_bridge_flags_get)
1876 return 0;
1877 return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
1878}
1879
1880static int
1881rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1882 u32 ageing_time,
1883 struct switchdev_trans *trans)
1884
1885{
1886 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1887
1888 if (!wops->port_attr_bridge_ageing_time_set)
1889 return 0;
1890 return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
1891 trans);
1892}
1893
1894static int
1895rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1896 const struct switchdev_obj_port_vlan *vlan,
1897 struct switchdev_trans *trans)
1898{
1899 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1900
1901 if (!wops->port_obj_vlan_add)
1902 return 0;
1903 return wops->port_obj_vlan_add(rocker_port, vlan, trans);
1904}
1905
1906static int
1907rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1908 const struct switchdev_obj_port_vlan *vlan)
1909{
1910 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1911
1912 if (!wops->port_obj_vlan_del)
1913 return 0;
1914 return wops->port_obj_vlan_del(rocker_port, vlan);
1915}
1916
1917static int
1918rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
1919 struct switchdev_obj_port_vlan *vlan,
1920 switchdev_obj_dump_cb_t *cb)
1921{
1922 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1923
1924 if (!wops->port_obj_vlan_dump)
1925 return 0;
1926 return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
1927}
1928
1929static int
1930rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
1931 const struct switchdev_obj_ipv4_fib *fib4,
1932 struct switchdev_trans *trans)
1933{
1934 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1935
1936 if (!wops->port_obj_fib4_add)
1937 return 0;
1938 return wops->port_obj_fib4_add(rocker_port, fib4, trans);
1939}
1940
1941static int
1942rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
1943 const struct switchdev_obj_ipv4_fib *fib4)
1944{
1945 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1946
1947 if (!wops->port_obj_fib4_del)
1948 return 0;
1949 return wops->port_obj_fib4_del(rocker_port, fib4);
1950}
1951
1952static int
1953rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
1954 const struct switchdev_obj_port_fdb *fdb,
1955 struct switchdev_trans *trans)
1956{
1957 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1958
1959 if (!wops->port_obj_fdb_add)
1960 return 0;
1961 return wops->port_obj_fdb_add(rocker_port, fdb, trans);
1962}
1963
1964static int
1965rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
1966 const struct switchdev_obj_port_fdb *fdb)
1967{
1968 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1969
1970 if (!wops->port_obj_fdb_del)
1971 return 0;
1972 return wops->port_obj_fdb_del(rocker_port, fdb);
1973}
1974
1975static int
1976rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
1977 struct switchdev_obj_port_fdb *fdb,
1978 switchdev_obj_dump_cb_t *cb)
1979{
1980 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1981
1982 if (!wops->port_obj_fdb_dump)
1983 return 0;
1984 return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
1985}
1986
1987static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1988 struct net_device *master)
1989{
1990 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1991
1992 if (!wops->port_master_linked)
1993 return 0;
1994 return wops->port_master_linked(rocker_port, master);
1995}
1996
1997static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1998 struct net_device *master)
1999{
2000 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2001
2002 if (!wops->port_master_unlinked)
2003 return 0;
2004 return wops->port_master_unlinked(rocker_port, master);
2005}
2006
2007static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
2008 struct neighbour *n)
2009{
2010 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2011
2012 if (!wops->port_neigh_update)
2013 return 0;
2014 return wops->port_neigh_update(rocker_port, n);
2015}
2016
2017static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
2018 struct neighbour *n)
2019{
2020 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2021
2022 if (!wops->port_neigh_destroy)
2023 return 0;
2024 return wops->port_neigh_destroy(rocker_port, n);
2025}
2026
2027static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2028 const unsigned char *addr,
2029 __be16 vlan_id)
2030{
2031 struct rocker_world_ops *wops = rocker_port->rocker->wops;
2032
2033 if (!wops->port_ev_mac_vlan_seen)
2034 return 0;
2035 return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
2036}
2037
Simon Hormane5054642015-05-25 14:28:36 +09002038static int
2039rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
2040 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002041{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002042 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2043 entry->key.ig_port.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002044 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002045 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2046 entry->key.ig_port.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002047 return -EMSGSIZE;
2048 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2049 entry->key.ig_port.goto_tbl))
2050 return -EMSGSIZE;
2051
2052 return 0;
2053}
2054
Simon Hormane5054642015-05-25 14:28:36 +09002055static int
2056rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
2057 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002058{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002059 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2060 entry->key.vlan.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002061 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002062 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2063 entry->key.vlan.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002064 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002065 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2066 entry->key.vlan.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002067 return -EMSGSIZE;
2068 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2069 entry->key.vlan.goto_tbl))
2070 return -EMSGSIZE;
2071 if (entry->key.vlan.untagged &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002072 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
2073 entry->key.vlan.new_vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002074 return -EMSGSIZE;
2075
2076 return 0;
2077}
2078
Simon Hormane5054642015-05-25 14:28:36 +09002079static int
2080rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
2081 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002082{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002083 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2084 entry->key.term_mac.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002085 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002086 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2087 entry->key.term_mac.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002088 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002089 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2090 entry->key.term_mac.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002091 return -EMSGSIZE;
2092 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2093 ETH_ALEN, entry->key.term_mac.eth_dst))
2094 return -EMSGSIZE;
2095 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2096 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
2097 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002098 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2099 entry->key.term_mac.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002100 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002101 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2102 entry->key.term_mac.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002103 return -EMSGSIZE;
2104 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2105 entry->key.term_mac.goto_tbl))
2106 return -EMSGSIZE;
2107 if (entry->key.term_mac.copy_to_cpu &&
2108 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2109 entry->key.term_mac.copy_to_cpu))
2110 return -EMSGSIZE;
2111
2112 return 0;
2113}
2114
2115static int
2116rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002117 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002118{
Jiri Pirko9b03c712014-12-03 14:14:53 +01002119 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2120 entry->key.ucast_routing.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002121 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002122 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2123 entry->key.ucast_routing.dst4))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002124 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002125 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2126 entry->key.ucast_routing.dst4_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002127 return -EMSGSIZE;
2128 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2129 entry->key.ucast_routing.goto_tbl))
2130 return -EMSGSIZE;
2131 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2132 entry->key.ucast_routing.group_id))
2133 return -EMSGSIZE;
2134
2135 return 0;
2136}
2137
Simon Hormane5054642015-05-25 14:28:36 +09002138static int
2139rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2140 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002141{
2142 if (entry->key.bridge.has_eth_dst &&
2143 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2144 ETH_ALEN, entry->key.bridge.eth_dst))
2145 return -EMSGSIZE;
2146 if (entry->key.bridge.has_eth_dst_mask &&
2147 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2148 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2149 return -EMSGSIZE;
2150 if (entry->key.bridge.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002151 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2152 entry->key.bridge.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002153 return -EMSGSIZE;
2154 if (entry->key.bridge.tunnel_id &&
2155 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2156 entry->key.bridge.tunnel_id))
2157 return -EMSGSIZE;
2158 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2159 entry->key.bridge.goto_tbl))
2160 return -EMSGSIZE;
2161 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2162 entry->key.bridge.group_id))
2163 return -EMSGSIZE;
2164 if (entry->key.bridge.copy_to_cpu &&
2165 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2166 entry->key.bridge.copy_to_cpu))
2167 return -EMSGSIZE;
2168
2169 return 0;
2170}
2171
Simon Hormane5054642015-05-25 14:28:36 +09002172static int
2173rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2174 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002175{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002176 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2177 entry->key.acl.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002178 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002179 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2180 entry->key.acl.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002181 return -EMSGSIZE;
2182 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2183 ETH_ALEN, entry->key.acl.eth_src))
2184 return -EMSGSIZE;
2185 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2186 ETH_ALEN, entry->key.acl.eth_src_mask))
2187 return -EMSGSIZE;
2188 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2189 ETH_ALEN, entry->key.acl.eth_dst))
2190 return -EMSGSIZE;
2191 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2192 ETH_ALEN, entry->key.acl.eth_dst_mask))
2193 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002194 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2195 entry->key.acl.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002196 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002197 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2198 entry->key.acl.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002199 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002200 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2201 entry->key.acl.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002202 return -EMSGSIZE;
2203
2204 switch (ntohs(entry->key.acl.eth_type)) {
2205 case ETH_P_IP:
2206 case ETH_P_IPV6:
2207 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2208 entry->key.acl.ip_proto))
2209 return -EMSGSIZE;
2210 if (rocker_tlv_put_u8(desc_info,
2211 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2212 entry->key.acl.ip_proto_mask))
2213 return -EMSGSIZE;
2214 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2215 entry->key.acl.ip_tos & 0x3f))
2216 return -EMSGSIZE;
2217 if (rocker_tlv_put_u8(desc_info,
2218 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2219 entry->key.acl.ip_tos_mask & 0x3f))
2220 return -EMSGSIZE;
2221 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2222 (entry->key.acl.ip_tos & 0xc0) >> 6))
2223 return -EMSGSIZE;
2224 if (rocker_tlv_put_u8(desc_info,
2225 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2226 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2227 return -EMSGSIZE;
2228 break;
2229 }
2230
2231 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2232 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2233 entry->key.acl.group_id))
2234 return -EMSGSIZE;
2235
2236 return 0;
2237}
2238
Simon Horman534ba6a2015-06-01 13:25:04 +09002239static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002240 struct rocker_desc_info *desc_info,
2241 void *priv)
2242{
Simon Hormane5054642015-05-25 14:28:36 +09002243 const struct rocker_flow_tbl_entry *entry = priv;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002244 struct rocker_tlv *cmd_info;
2245 int err = 0;
2246
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002247 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002248 return -EMSGSIZE;
2249 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2250 if (!cmd_info)
2251 return -EMSGSIZE;
2252 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2253 entry->key.tbl_id))
2254 return -EMSGSIZE;
2255 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2256 entry->key.priority))
2257 return -EMSGSIZE;
2258 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2259 return -EMSGSIZE;
2260 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2261 entry->cookie))
2262 return -EMSGSIZE;
2263
2264 switch (entry->key.tbl_id) {
2265 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2266 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2267 break;
2268 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2269 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2270 break;
2271 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2272 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2273 break;
2274 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2275 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2276 break;
2277 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2278 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2279 break;
2280 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2281 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2282 break;
2283 default:
2284 err = -ENOTSUPP;
2285 break;
2286 }
2287
2288 if (err)
2289 return err;
2290
2291 rocker_tlv_nest_end(desc_info, cmd_info);
2292
2293 return 0;
2294}
2295
Simon Horman534ba6a2015-06-01 13:25:04 +09002296static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002297 struct rocker_desc_info *desc_info,
2298 void *priv)
2299{
2300 const struct rocker_flow_tbl_entry *entry = priv;
2301 struct rocker_tlv *cmd_info;
2302
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002303 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002304 return -EMSGSIZE;
2305 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2306 if (!cmd_info)
2307 return -EMSGSIZE;
2308 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2309 entry->cookie))
2310 return -EMSGSIZE;
2311 rocker_tlv_nest_end(desc_info, cmd_info);
2312
2313 return 0;
2314}
2315
2316static int
2317rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2318 struct rocker_group_tbl_entry *entry)
2319{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002320 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002321 ROCKER_GROUP_PORT_GET(entry->group_id)))
2322 return -EMSGSIZE;
2323 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2324 entry->l2_interface.pop_vlan))
2325 return -EMSGSIZE;
2326
2327 return 0;
2328}
2329
2330static int
2331rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002332 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002333{
2334 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2335 entry->l2_rewrite.group_id))
2336 return -EMSGSIZE;
2337 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2338 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2339 ETH_ALEN, entry->l2_rewrite.eth_src))
2340 return -EMSGSIZE;
2341 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2342 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2343 ETH_ALEN, entry->l2_rewrite.eth_dst))
2344 return -EMSGSIZE;
2345 if (entry->l2_rewrite.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002346 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2347 entry->l2_rewrite.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002348 return -EMSGSIZE;
2349
2350 return 0;
2351}
2352
2353static int
2354rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002355 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002356{
2357 int i;
2358 struct rocker_tlv *group_ids;
2359
2360 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2361 entry->group_count))
2362 return -EMSGSIZE;
2363
2364 group_ids = rocker_tlv_nest_start(desc_info,
2365 ROCKER_TLV_OF_DPA_GROUP_IDS);
2366 if (!group_ids)
2367 return -EMSGSIZE;
2368
2369 for (i = 0; i < entry->group_count; i++)
2370 /* Note TLV array is 1-based */
2371 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2372 return -EMSGSIZE;
2373
2374 rocker_tlv_nest_end(desc_info, group_ids);
2375
2376 return 0;
2377}
2378
2379static int
2380rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002381 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002382{
2383 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2384 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2385 ETH_ALEN, entry->l3_unicast.eth_src))
2386 return -EMSGSIZE;
2387 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2388 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2389 ETH_ALEN, entry->l3_unicast.eth_dst))
2390 return -EMSGSIZE;
2391 if (entry->l3_unicast.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002392 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2393 entry->l3_unicast.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002394 return -EMSGSIZE;
2395 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2396 entry->l3_unicast.ttl_check))
2397 return -EMSGSIZE;
2398 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2399 entry->l3_unicast.group_id))
2400 return -EMSGSIZE;
2401
2402 return 0;
2403}
2404
Simon Horman534ba6a2015-06-01 13:25:04 +09002405static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002406 struct rocker_desc_info *desc_info,
2407 void *priv)
2408{
2409 struct rocker_group_tbl_entry *entry = priv;
2410 struct rocker_tlv *cmd_info;
2411 int err = 0;
2412
2413 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2414 return -EMSGSIZE;
2415 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2416 if (!cmd_info)
2417 return -EMSGSIZE;
2418
2419 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2420 entry->group_id))
2421 return -EMSGSIZE;
2422
2423 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2424 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2425 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2426 break;
2427 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2428 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2429 break;
2430 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2431 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2432 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2433 break;
2434 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2435 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2436 break;
2437 default:
2438 err = -ENOTSUPP;
2439 break;
2440 }
2441
2442 if (err)
2443 return err;
2444
2445 rocker_tlv_nest_end(desc_info, cmd_info);
2446
2447 return 0;
2448}
2449
Simon Horman534ba6a2015-06-01 13:25:04 +09002450static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002451 struct rocker_desc_info *desc_info,
2452 void *priv)
2453{
2454 const struct rocker_group_tbl_entry *entry = priv;
2455 struct rocker_tlv *cmd_info;
2456
2457 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2458 return -EMSGSIZE;
2459 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2460 if (!cmd_info)
2461 return -EMSGSIZE;
2462 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2463 entry->group_id))
2464 return -EMSGSIZE;
2465 rocker_tlv_nest_end(desc_info, cmd_info);
2466
2467 return 0;
2468}
2469
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002470/***************************************************
2471 * Flow, group, FDB, internal VLAN and neigh tables
2472 ***************************************************/
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002473
2474static int rocker_init_tbls(struct rocker *rocker)
2475{
2476 hash_init(rocker->flow_tbl);
2477 spin_lock_init(&rocker->flow_tbl_lock);
2478
2479 hash_init(rocker->group_tbl);
2480 spin_lock_init(&rocker->group_tbl_lock);
2481
2482 hash_init(rocker->fdb_tbl);
2483 spin_lock_init(&rocker->fdb_tbl_lock);
2484
2485 hash_init(rocker->internal_vlan_tbl);
2486 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2487
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002488 hash_init(rocker->neigh_tbl);
2489 spin_lock_init(&rocker->neigh_tbl_lock);
2490
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002491 return 0;
2492}
2493
2494static void rocker_free_tbls(struct rocker *rocker)
2495{
2496 unsigned long flags;
2497 struct rocker_flow_tbl_entry *flow_entry;
2498 struct rocker_group_tbl_entry *group_entry;
2499 struct rocker_fdb_tbl_entry *fdb_entry;
2500 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002501 struct rocker_neigh_tbl_entry *neigh_entry;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002502 struct hlist_node *tmp;
2503 int bkt;
2504
2505 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2506 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2507 hash_del(&flow_entry->entry);
2508 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2509
2510 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2511 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2512 hash_del(&group_entry->entry);
2513 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2514
2515 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2516 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2517 hash_del(&fdb_entry->entry);
2518 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2519
2520 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2521 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2522 tmp, internal_vlan_entry, entry)
2523 hash_del(&internal_vlan_entry->entry);
2524 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002525
2526 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2527 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2528 hash_del(&neigh_entry->entry);
2529 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002530}
2531
2532static struct rocker_flow_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002533rocker_flow_tbl_find(const struct rocker *rocker,
2534 const struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002535{
2536 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002537 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002538
2539 hash_for_each_possible(rocker->flow_tbl, found,
2540 entry, match->key_crc32) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002541 if (memcmp(&found->key, &match->key, key_len) == 0)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002542 return found;
2543 }
2544
2545 return NULL;
2546}
2547
2548static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002549 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002550 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002551{
2552 struct rocker *rocker = rocker_port->rocker;
2553 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002554 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002555 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002556
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002557 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002558
Scott Feldman179f9a22015-06-12 21:35:46 -07002559 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002560
2561 found = rocker_flow_tbl_find(rocker, match);
2562
2563 if (found) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002564 match->cookie = found->cookie;
Jiri Pirko76c6f942015-09-24 10:02:44 +02002565 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002566 hash_del(&found->entry);
Jiri Pirkob15edf82016-02-16 15:14:39 +01002567 rocker_kfree(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002568 found = match;
2569 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002570 } else {
2571 found = match;
2572 found->cookie = rocker->flow_tbl_next_cookie++;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002573 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002574 }
2575
Jiri Pirko76c6f942015-09-24 10:02:44 +02002576 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002577 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002578
Scott Feldman179f9a22015-06-12 21:35:46 -07002579 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002580
Jiri Pirko76c6f942015-09-24 10:02:44 +02002581 return rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07002582 rocker_cmd_flow_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002583}
2584
2585static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002586 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002587 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002588{
2589 struct rocker *rocker = rocker_port->rocker;
2590 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002591 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002592 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002593 int err = 0;
2594
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002595 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002596
Scott Feldman179f9a22015-06-12 21:35:46 -07002597 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002598
2599 found = rocker_flow_tbl_find(rocker, match);
2600
2601 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002602 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002603 hash_del(&found->entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002604 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002605 }
2606
Scott Feldman179f9a22015-06-12 21:35:46 -07002607 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002608
Jiri Pirkob15edf82016-02-16 15:14:39 +01002609 rocker_kfree(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002610
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002611 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002612 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002613 rocker_cmd_flow_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002614 found, NULL, NULL);
Jiri Pirkob15edf82016-02-16 15:14:39 +01002615 rocker_kfree(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002616 }
2617
2618 return err;
2619}
2620
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002621static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002622 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002623 struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002624{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002625 if (flags & ROCKER_OP_FLAG_REMOVE)
Jiri Pirko76c6f942015-09-24 10:02:44 +02002626 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002627 else
Jiri Pirko76c6f942015-09-24 10:02:44 +02002628 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002629}
2630
2631static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002632 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002633 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002634 enum rocker_of_dpa_table_id goto_tbl)
2635{
2636 struct rocker_flow_tbl_entry *entry;
2637
Jiri Pirkob15edf82016-02-16 15:14:39 +01002638 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002639 if (!entry)
2640 return -ENOMEM;
2641
2642 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2643 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002644 entry->key.ig_port.in_pport = in_pport;
2645 entry->key.ig_port.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002646 entry->key.ig_port.goto_tbl = goto_tbl;
2647
Jiri Pirko76c6f942015-09-24 10:02:44 +02002648 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002649}
2650
2651static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002652 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002653 u32 in_pport, __be16 vlan_id,
2654 __be16 vlan_id_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002655 enum rocker_of_dpa_table_id goto_tbl,
2656 bool untagged, __be16 new_vlan_id)
2657{
2658 struct rocker_flow_tbl_entry *entry;
2659
Jiri Pirkob15edf82016-02-16 15:14:39 +01002660 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002661 if (!entry)
2662 return -ENOMEM;
2663
2664 entry->key.priority = ROCKER_PRIORITY_VLAN;
2665 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002666 entry->key.vlan.in_pport = in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002667 entry->key.vlan.vlan_id = vlan_id;
2668 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2669 entry->key.vlan.goto_tbl = goto_tbl;
2670
2671 entry->key.vlan.untagged = untagged;
2672 entry->key.vlan.new_vlan_id = new_vlan_id;
2673
Jiri Pirko76c6f942015-09-24 10:02:44 +02002674 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002675}
2676
2677static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002678 struct switchdev_trans *trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002679 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002680 __be16 eth_type, const u8 *eth_dst,
2681 const u8 *eth_dst_mask, __be16 vlan_id,
2682 __be16 vlan_id_mask, bool copy_to_cpu,
2683 int flags)
2684{
2685 struct rocker_flow_tbl_entry *entry;
2686
Jiri Pirkob15edf82016-02-16 15:14:39 +01002687 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002688 if (!entry)
2689 return -ENOMEM;
2690
2691 if (is_multicast_ether_addr(eth_dst)) {
2692 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2693 entry->key.term_mac.goto_tbl =
2694 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2695 } else {
2696 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2697 entry->key.term_mac.goto_tbl =
2698 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2699 }
2700
2701 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002702 entry->key.term_mac.in_pport = in_pport;
2703 entry->key.term_mac.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002704 entry->key.term_mac.eth_type = eth_type;
2705 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2706 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2707 entry->key.term_mac.vlan_id = vlan_id;
2708 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2709 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2710
Jiri Pirko76c6f942015-09-24 10:02:44 +02002711 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002712}
2713
2714static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002715 struct switchdev_trans *trans, int flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002716 const u8 *eth_dst, const u8 *eth_dst_mask,
2717 __be16 vlan_id, u32 tunnel_id,
2718 enum rocker_of_dpa_table_id goto_tbl,
2719 u32 group_id, bool copy_to_cpu)
2720{
2721 struct rocker_flow_tbl_entry *entry;
2722 u32 priority;
2723 bool vlan_bridging = !!vlan_id;
2724 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2725 bool wild = false;
2726
Jiri Pirkob15edf82016-02-16 15:14:39 +01002727 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002728 if (!entry)
2729 return -ENOMEM;
2730
2731 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2732
2733 if (eth_dst) {
2734 entry->key.bridge.has_eth_dst = 1;
2735 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2736 }
2737 if (eth_dst_mask) {
2738 entry->key.bridge.has_eth_dst_mask = 1;
2739 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
Simon Horman629161f2015-04-30 15:21:29 +09002740 if (!ether_addr_equal(eth_dst_mask, ff_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002741 wild = true;
2742 }
2743
2744 priority = ROCKER_PRIORITY_UNKNOWN;
Thomas Graf51ace882014-11-28 14:34:32 +01002745 if (vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002746 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002747 else if (vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002748 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002749 else if (vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002750 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
Thomas Graf51ace882014-11-28 14:34:32 +01002751 else if (!vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002752 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002753 else if (!vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002754 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002755 else if (!vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002756 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2757
2758 entry->key.priority = priority;
2759 entry->key.bridge.vlan_id = vlan_id;
2760 entry->key.bridge.tunnel_id = tunnel_id;
2761 entry->key.bridge.goto_tbl = goto_tbl;
2762 entry->key.bridge.group_id = group_id;
2763 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2764
Jiri Pirko76c6f942015-09-24 10:02:44 +02002765 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002766}
2767
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002768static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002769 struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002770 __be16 eth_type, __be32 dst,
2771 __be32 dst_mask, u32 priority,
2772 enum rocker_of_dpa_table_id goto_tbl,
2773 u32 group_id, int flags)
2774{
2775 struct rocker_flow_tbl_entry *entry;
2776
Jiri Pirkob15edf82016-02-16 15:14:39 +01002777 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002778 if (!entry)
2779 return -ENOMEM;
2780
2781 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2782 entry->key.priority = priority;
2783 entry->key.ucast_routing.eth_type = eth_type;
2784 entry->key.ucast_routing.dst4 = dst;
2785 entry->key.ucast_routing.dst4_mask = dst_mask;
2786 entry->key.ucast_routing.goto_tbl = goto_tbl;
2787 entry->key.ucast_routing.group_id = group_id;
2788 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2789 ucast_routing.group_id);
2790
Jiri Pirko76c6f942015-09-24 10:02:44 +02002791 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002792}
2793
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002794static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002795 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002796 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002797 const u8 *eth_src, const u8 *eth_src_mask,
2798 const u8 *eth_dst, const u8 *eth_dst_mask,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002799 __be16 eth_type, __be16 vlan_id,
2800 __be16 vlan_id_mask, u8 ip_proto,
2801 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002802 u32 group_id)
2803{
2804 u32 priority;
2805 struct rocker_flow_tbl_entry *entry;
2806
Jiri Pirkob15edf82016-02-16 15:14:39 +01002807 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002808 if (!entry)
2809 return -ENOMEM;
2810
2811 priority = ROCKER_PRIORITY_ACL_NORMAL;
2812 if (eth_dst && eth_dst_mask) {
Simon Horman629161f2015-04-30 15:21:29 +09002813 if (ether_addr_equal(eth_dst_mask, mcast_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002814 priority = ROCKER_PRIORITY_ACL_DFLT;
2815 else if (is_link_local_ether_addr(eth_dst))
2816 priority = ROCKER_PRIORITY_ACL_CTRL;
2817 }
2818
2819 entry->key.priority = priority;
2820 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002821 entry->key.acl.in_pport = in_pport;
2822 entry->key.acl.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002823
2824 if (eth_src)
2825 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2826 if (eth_src_mask)
2827 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2828 if (eth_dst)
2829 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2830 if (eth_dst_mask)
2831 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2832
2833 entry->key.acl.eth_type = eth_type;
2834 entry->key.acl.vlan_id = vlan_id;
2835 entry->key.acl.vlan_id_mask = vlan_id_mask;
2836 entry->key.acl.ip_proto = ip_proto;
2837 entry->key.acl.ip_proto_mask = ip_proto_mask;
2838 entry->key.acl.ip_tos = ip_tos;
2839 entry->key.acl.ip_tos_mask = ip_tos_mask;
2840 entry->key.acl.group_id = group_id;
2841
Jiri Pirko76c6f942015-09-24 10:02:44 +02002842 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002843}
2844
2845static struct rocker_group_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002846rocker_group_tbl_find(const struct rocker *rocker,
2847 const struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002848{
2849 struct rocker_group_tbl_entry *found;
2850
2851 hash_for_each_possible(rocker->group_tbl, found,
2852 entry, match->group_id) {
2853 if (found->group_id == match->group_id)
2854 return found;
2855 }
2856
2857 return NULL;
2858}
2859
Jiri Pirko76c6f942015-09-24 10:02:44 +02002860static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002861 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002862{
2863 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2864 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2865 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
Jiri Pirkob15edf82016-02-16 15:14:39 +01002866 rocker_kfree(trans, entry->group_ids);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002867 break;
2868 default:
2869 break;
2870 }
Jiri Pirkob15edf82016-02-16 15:14:39 +01002871 rocker_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002872}
2873
2874static int rocker_group_tbl_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002875 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002876 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002877{
2878 struct rocker *rocker = rocker_port->rocker;
2879 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002880 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002881
Scott Feldman179f9a22015-06-12 21:35:46 -07002882 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002883
2884 found = rocker_group_tbl_find(rocker, match);
2885
2886 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002887 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002888 hash_del(&found->entry);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002889 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002890 found = match;
2891 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2892 } else {
2893 found = match;
2894 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2895 }
2896
Jiri Pirko76c6f942015-09-24 10:02:44 +02002897 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002898 hash_add(rocker->group_tbl, &found->entry, found->group_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002899
Scott Feldman179f9a22015-06-12 21:35:46 -07002900 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002901
Jiri Pirko76c6f942015-09-24 10:02:44 +02002902 return rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07002903 rocker_cmd_group_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002904}
2905
2906static int rocker_group_tbl_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002907 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002908 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002909{
2910 struct rocker *rocker = rocker_port->rocker;
2911 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002912 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002913 int err = 0;
2914
Scott Feldman179f9a22015-06-12 21:35:46 -07002915 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002916
2917 found = rocker_group_tbl_find(rocker, match);
2918
2919 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002920 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002921 hash_del(&found->entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002922 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2923 }
2924
Scott Feldman179f9a22015-06-12 21:35:46 -07002925 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002926
Jiri Pirko76c6f942015-09-24 10:02:44 +02002927 rocker_group_tbl_entry_free(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002928
2929 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002930 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002931 rocker_cmd_group_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002932 found, NULL, NULL);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002933 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002934 }
2935
2936 return err;
2937}
2938
2939static int rocker_group_tbl_do(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002940 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002941 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002942{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002943 if (flags & ROCKER_OP_FLAG_REMOVE)
Jiri Pirko76c6f942015-09-24 10:02:44 +02002944 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002945 else
Jiri Pirko76c6f942015-09-24 10:02:44 +02002946 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002947}
2948
2949static int rocker_group_l2_interface(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002950 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002951 __be16 vlan_id, u32 out_pport,
2952 int pop_vlan)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002953{
2954 struct rocker_group_tbl_entry *entry;
2955
Jiri Pirkob15edf82016-02-16 15:14:39 +01002956 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002957 if (!entry)
2958 return -ENOMEM;
2959
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002960 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002961 entry->l2_interface.pop_vlan = pop_vlan;
2962
Jiri Pirko76c6f942015-09-24 10:02:44 +02002963 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002964}
2965
2966static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002967 struct switchdev_trans *trans,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002968 int flags, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002969 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002970{
2971 struct rocker_group_tbl_entry *entry;
2972
Jiri Pirkob15edf82016-02-16 15:14:39 +01002973 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002974 if (!entry)
2975 return -ENOMEM;
2976
2977 entry->group_id = group_id;
2978 entry->group_count = group_count;
2979
Jiri Pirkob15edf82016-02-16 15:14:39 +01002980 entry->group_ids = rocker_kcalloc(trans, flags,
2981 group_count, sizeof(u32));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002982 if (!entry->group_ids) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01002983 rocker_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002984 return -ENOMEM;
2985 }
2986 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2987
Jiri Pirko76c6f942015-09-24 10:02:44 +02002988 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002989}
2990
2991static int rocker_group_l2_flood(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002992 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002993 __be16 vlan_id, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002994 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002995{
Jiri Pirko76c6f942015-09-24 10:02:44 +02002996 return rocker_group_l2_fan_out(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002997 group_count, group_ids,
2998 group_id);
2999}
3000
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003001static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003002 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003003 u32 index, const u8 *src_mac, const u8 *dst_mac,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003004 __be16 vlan_id, bool ttl_check, u32 pport)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003005{
3006 struct rocker_group_tbl_entry *entry;
3007
Jiri Pirkob15edf82016-02-16 15:14:39 +01003008 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003009 if (!entry)
3010 return -ENOMEM;
3011
3012 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
3013 if (src_mac)
3014 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
3015 if (dst_mac)
3016 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
3017 entry->l3_unicast.vlan_id = vlan_id;
3018 entry->l3_unicast.ttl_check = ttl_check;
3019 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
3020
Jiri Pirko76c6f942015-09-24 10:02:44 +02003021 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003022}
3023
3024static struct rocker_neigh_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003025rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003026{
3027 struct rocker_neigh_tbl_entry *found;
3028
Scott Feldman0f43deb2015-03-06 15:54:51 -08003029 hash_for_each_possible(rocker->neigh_tbl, found,
3030 entry, be32_to_cpu(ip_addr))
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003031 if (found->ip_addr == ip_addr)
3032 return found;
3033
3034 return NULL;
3035}
3036
3037static void _rocker_neigh_add(struct rocker *rocker,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003038 struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003039 struct rocker_neigh_tbl_entry *entry)
3040{
Jiri Pirko76c6f942015-09-24 10:02:44 +02003041 if (!switchdev_trans_ph_commit(trans))
Scott Feldman4d81db42015-06-12 21:24:40 -07003042 entry->index = rocker->neigh_tbl_next_index++;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003043 if (switchdev_trans_ph_prepare(trans))
Simon Horman550ecc92015-05-21 12:40:16 +09003044 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003045 entry->ref_count++;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003046 hash_add(rocker->neigh_tbl, &entry->entry,
3047 be32_to_cpu(entry->ip_addr));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003048}
3049
Jiri Pirko76c6f942015-09-24 10:02:44 +02003050static void _rocker_neigh_del(struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003051 struct rocker_neigh_tbl_entry *entry)
3052{
Jiri Pirko76c6f942015-09-24 10:02:44 +02003053 if (switchdev_trans_ph_prepare(trans))
Simon Horman550ecc92015-05-21 12:40:16 +09003054 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003055 if (--entry->ref_count == 0) {
3056 hash_del(&entry->entry);
Jiri Pirkob15edf82016-02-16 15:14:39 +01003057 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003058 }
3059}
3060
Scott Feldmanc4f20322015-05-10 09:47:50 -07003061static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003062 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09003063 const u8 *eth_dst, bool ttl_check)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003064{
3065 if (eth_dst) {
3066 ether_addr_copy(entry->eth_dst, eth_dst);
3067 entry->ttl_check = ttl_check;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003068 } else if (!switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003069 entry->ref_count++;
3070 }
3071}
3072
3073static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003074 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09003075 int flags, __be32 ip_addr, const u8 *eth_dst)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003076{
3077 struct rocker *rocker = rocker_port->rocker;
3078 struct rocker_neigh_tbl_entry *entry;
3079 struct rocker_neigh_tbl_entry *found;
3080 unsigned long lock_flags;
3081 __be16 eth_type = htons(ETH_P_IP);
3082 enum rocker_of_dpa_table_id goto_tbl =
3083 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3084 u32 group_id;
3085 u32 priority = 0;
3086 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3087 bool updating;
3088 bool removing;
3089 int err = 0;
3090
Jiri Pirkob15edf82016-02-16 15:14:39 +01003091 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003092 if (!entry)
3093 return -ENOMEM;
3094
3095 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3096
3097 found = rocker_neigh_tbl_find(rocker, ip_addr);
3098
3099 updating = found && adding;
3100 removing = found && !adding;
3101 adding = !found && adding;
3102
3103 if (adding) {
3104 entry->ip_addr = ip_addr;
3105 entry->dev = rocker_port->dev;
3106 ether_addr_copy(entry->eth_dst, eth_dst);
3107 entry->ttl_check = true;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003108 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003109 } else if (removing) {
3110 memcpy(entry, found, sizeof(*entry));
Jiri Pirko76c6f942015-09-24 10:02:44 +02003111 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003112 } else if (updating) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003113 _rocker_neigh_update(found, trans, eth_dst, true);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003114 memcpy(entry, found, sizeof(*entry));
3115 } else {
3116 err = -ENOENT;
3117 }
3118
3119 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3120
3121 if (err)
3122 goto err_out;
3123
3124 /* For each active neighbor, we have an L3 unicast group and
3125 * a /32 route to the neighbor, which uses the L3 unicast
3126 * group. The L3 unicast group can also be referred to by
3127 * other routes' nexthops.
3128 */
3129
Jiri Pirko76c6f942015-09-24 10:02:44 +02003130 err = rocker_group_l3_unicast(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003131 entry->index,
3132 rocker_port->dev->dev_addr,
3133 entry->eth_dst,
3134 rocker_port->internal_vlan_id,
3135 entry->ttl_check,
3136 rocker_port->pport);
3137 if (err) {
3138 netdev_err(rocker_port->dev,
3139 "Error (%d) L3 unicast group index %d\n",
3140 err, entry->index);
3141 goto err_out;
3142 }
3143
3144 if (adding || removing) {
3145 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003146 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003147 eth_type, ip_addr,
3148 inet_make_mask(32),
3149 priority, goto_tbl,
3150 group_id, flags);
3151
3152 if (err)
3153 netdev_err(rocker_port->dev,
3154 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3155 err, &entry->ip_addr, group_id);
3156 }
3157
3158err_out:
3159 if (!adding)
Jiri Pirkob15edf82016-02-16 15:14:39 +01003160 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003161
3162 return err;
3163}
3164
3165static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003166 struct switchdev_trans *trans,
3167 __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003168{
3169 struct net_device *dev = rocker_port->dev;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003170 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003171 int err = 0;
3172
Ying Xue4133fc02015-05-15 12:53:21 +08003173 if (!n) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003174 n = neigh_create(&arp_tbl, &ip_addr, dev);
Ying Xue4133fc02015-05-15 12:53:21 +08003175 if (IS_ERR(n))
3176 return IS_ERR(n);
3177 }
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003178
3179 /* If the neigh is already resolved, then go ahead and
3180 * install the entry, otherwise start the ARP process to
3181 * resolve the neigh.
3182 */
3183
3184 if (n->nud_state & NUD_VALID)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003185 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003186 ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003187 else
3188 neigh_event_send(n, NULL);
3189
Ying Xue4133fc02015-05-15 12:53:21 +08003190 neigh_release(n);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003191 return err;
3192}
3193
Scott Feldmanc4f20322015-05-10 09:47:50 -07003194static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003195 struct switchdev_trans *trans, int flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003196 __be32 ip_addr, u32 *index)
3197{
3198 struct rocker *rocker = rocker_port->rocker;
3199 struct rocker_neigh_tbl_entry *entry;
3200 struct rocker_neigh_tbl_entry *found;
3201 unsigned long lock_flags;
3202 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3203 bool updating;
3204 bool removing;
3205 bool resolved = true;
3206 int err = 0;
3207
Jiri Pirkob15edf82016-02-16 15:14:39 +01003208 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003209 if (!entry)
3210 return -ENOMEM;
3211
3212 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3213
3214 found = rocker_neigh_tbl_find(rocker, ip_addr);
3215 if (found)
3216 *index = found->index;
3217
3218 updating = found && adding;
3219 removing = found && !adding;
3220 adding = !found && adding;
3221
3222 if (adding) {
3223 entry->ip_addr = ip_addr;
3224 entry->dev = rocker_port->dev;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003225 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003226 *index = entry->index;
3227 resolved = false;
3228 } else if (removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003229 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003230 } else if (updating) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003231 _rocker_neigh_update(found, trans, NULL, false);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003232 resolved = !is_zero_ether_addr(found->eth_dst);
3233 } else {
3234 err = -ENOENT;
3235 }
3236
3237 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3238
3239 if (!adding)
Jiri Pirkob15edf82016-02-16 15:14:39 +01003240 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003241
3242 if (err)
3243 return err;
3244
3245 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3246
3247 if (!resolved)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003248 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003249
3250 return err;
3251}
3252
Scott Feldman6c707942014-11-28 14:34:28 +01003253static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003254 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003255 int flags, __be16 vlan_id)
3256{
3257 struct rocker_port *p;
Simon Hormane5054642015-05-25 14:28:36 +09003258 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003259 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
Scott Feldman04f49fa2015-03-15 23:04:46 -07003260 u32 *group_ids;
Scott Feldman6c707942014-11-28 14:34:28 +01003261 u8 group_count = 0;
Scott Feldman04f49fa2015-03-15 23:04:46 -07003262 int err = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01003263 int i;
3264
Jiri Pirkob15edf82016-02-16 15:14:39 +01003265 group_ids = rocker_kcalloc(trans, flags,
3266 rocker->port_count, sizeof(u32));
Scott Feldman04f49fa2015-03-15 23:04:46 -07003267 if (!group_ids)
3268 return -ENOMEM;
3269
Scott Feldman6c707942014-11-28 14:34:28 +01003270 /* Adjust the flood group for this VLAN. The flood group
3271 * references an L2 interface group for each port in this
3272 * VLAN.
3273 */
3274
3275 for (i = 0; i < rocker->port_count; i++) {
3276 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003277 if (!p)
3278 continue;
Scott Feldman6c707942014-11-28 14:34:28 +01003279 if (!rocker_port_is_bridged(p))
3280 continue;
3281 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3282 group_ids[group_count++] =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003283 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003284 }
3285 }
3286
3287 /* If there are no bridged ports in this VLAN, we're done */
3288 if (group_count == 0)
Scott Feldman04f49fa2015-03-15 23:04:46 -07003289 goto no_ports_in_vlan;
Scott Feldman6c707942014-11-28 14:34:28 +01003290
Jiri Pirko76c6f942015-09-24 10:02:44 +02003291 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003292 group_count, group_ids, group_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003293 if (err)
3294 netdev_err(rocker_port->dev,
3295 "Error (%d) port VLAN l2 flood group\n", err);
3296
Scott Feldman04f49fa2015-03-15 23:04:46 -07003297no_ports_in_vlan:
Jiri Pirkob15edf82016-02-16 15:14:39 +01003298 rocker_kfree(trans, group_ids);
Scott Feldman6c707942014-11-28 14:34:28 +01003299 return err;
3300}
3301
3302static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003303 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003304 __be16 vlan_id, bool pop_vlan)
Scott Feldman6c707942014-11-28 14:34:28 +01003305{
Simon Hormane5054642015-05-25 14:28:36 +09003306 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003307 struct rocker_port *p;
3308 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003309 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003310 int ref = 0;
3311 int err;
3312 int i;
3313
3314 /* An L2 interface group for this port in this VLAN, but
3315 * only when port STP state is LEARNING|FORWARDING.
3316 */
3317
3318 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3319 rocker_port->stp_state == BR_STATE_FORWARDING) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003320 out_pport = rocker_port->pport;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003321 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003322 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003323 if (err) {
3324 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003325 "Error (%d) port VLAN l2 group for pport %d\n",
3326 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003327 return err;
3328 }
3329 }
3330
3331 /* An L2 interface group for this VLAN to CPU port.
3332 * Add when first port joins this VLAN and destroy when
3333 * last port leaves this VLAN.
3334 */
3335
3336 for (i = 0; i < rocker->port_count; i++) {
3337 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003338 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003339 ref++;
3340 }
3341
3342 if ((!adding || ref != 1) && (adding || ref != 0))
3343 return 0;
3344
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003345 out_pport = 0;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003346 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003347 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003348 if (err) {
3349 netdev_err(rocker_port->dev,
3350 "Error (%d) port VLAN l2 group for CPU port\n", err);
3351 return err;
3352 }
3353
3354 return 0;
3355}
3356
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003357static struct rocker_ctrl {
3358 const u8 *eth_dst;
3359 const u8 *eth_dst_mask;
Jiri Pirko11e6c652014-12-03 14:14:54 +01003360 __be16 eth_type;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003361 bool acl;
3362 bool bridge;
3363 bool term;
3364 bool copy_to_cpu;
3365} rocker_ctrls[] = {
3366 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3367 /* pass link local multicast pkts up to CPU for filtering */
3368 .eth_dst = ll_mac,
3369 .eth_dst_mask = ll_mask,
3370 .acl = true,
3371 },
3372 [ROCKER_CTRL_LOCAL_ARP] = {
3373 /* pass local ARP pkts up to CPU */
3374 .eth_dst = zero_mac,
3375 .eth_dst_mask = zero_mac,
3376 .eth_type = htons(ETH_P_ARP),
3377 .acl = true,
3378 },
3379 [ROCKER_CTRL_IPV4_MCAST] = {
3380 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3381 .eth_dst = ipv4_mcast,
3382 .eth_dst_mask = ipv4_mask,
3383 .eth_type = htons(ETH_P_IP),
3384 .term = true,
3385 .copy_to_cpu = true,
3386 },
3387 [ROCKER_CTRL_IPV6_MCAST] = {
3388 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3389 .eth_dst = ipv6_mcast,
3390 .eth_dst_mask = ipv6_mask,
3391 .eth_type = htons(ETH_P_IPV6),
3392 .term = true,
3393 .copy_to_cpu = true,
3394 },
3395 [ROCKER_CTRL_DFLT_BRIDGING] = {
3396 /* flood any pkts on vlan */
3397 .bridge = true,
3398 .copy_to_cpu = true,
3399 },
Simon Horman82549732015-07-16 10:39:14 +09003400 [ROCKER_CTRL_DFLT_OVS] = {
3401 /* pass all pkts up to CPU */
3402 .eth_dst = zero_mac,
3403 .eth_dst_mask = zero_mac,
3404 .acl = true,
3405 },
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003406};
3407
3408static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003409 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003410 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003411{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003412 u32 in_pport = rocker_port->pport;
3413 u32 in_pport_mask = 0xffffffff;
3414 u32 out_pport = 0;
Simon Hormane5054642015-05-25 14:28:36 +09003415 const u8 *eth_src = NULL;
3416 const u8 *eth_src_mask = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003417 __be16 vlan_id_mask = htons(0xffff);
3418 u8 ip_proto = 0;
3419 u8 ip_proto_mask = 0;
3420 u8 ip_tos = 0;
3421 u8 ip_tos_mask = 0;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003422 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003423 int err;
3424
Jiri Pirko76c6f942015-09-24 10:02:44 +02003425 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003426 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003427 eth_src, eth_src_mask,
3428 ctrl->eth_dst, ctrl->eth_dst_mask,
3429 ctrl->eth_type,
3430 vlan_id, vlan_id_mask,
3431 ip_proto, ip_proto_mask,
3432 ip_tos, ip_tos_mask,
3433 group_id);
3434
3435 if (err)
3436 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3437
3438 return err;
3439}
3440
Scott Feldman6c707942014-11-28 14:34:28 +01003441static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003442 struct switchdev_trans *trans,
3443 int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003444 const struct rocker_ctrl *ctrl,
Scott Feldman6c707942014-11-28 14:34:28 +01003445 __be16 vlan_id)
3446{
3447 enum rocker_of_dpa_table_id goto_tbl =
3448 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3449 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3450 u32 tunnel_id = 0;
3451 int err;
3452
3453 if (!rocker_port_is_bridged(rocker_port))
3454 return 0;
3455
Jiri Pirko76c6f942015-09-24 10:02:44 +02003456 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003457 ctrl->eth_dst, ctrl->eth_dst_mask,
3458 vlan_id, tunnel_id,
3459 goto_tbl, group_id, ctrl->copy_to_cpu);
3460
3461 if (err)
3462 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3463
3464 return err;
3465}
3466
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003467static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003468 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003469 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003470{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003471 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003472 __be16 vlan_id_mask = htons(0xffff);
3473 int err;
3474
3475 if (ntohs(vlan_id) == 0)
3476 vlan_id = rocker_port->internal_vlan_id;
3477
Jiri Pirko76c6f942015-09-24 10:02:44 +02003478 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003479 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003480 ctrl->eth_type, ctrl->eth_dst,
3481 ctrl->eth_dst_mask, vlan_id,
3482 vlan_id_mask, ctrl->copy_to_cpu,
3483 flags);
3484
3485 if (err)
3486 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3487
3488 return err;
3489}
3490
Scott Feldmanc4f20322015-05-10 09:47:50 -07003491static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003492 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003493 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003494{
3495 if (ctrl->acl)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003496 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003497 ctrl, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003498 if (ctrl->bridge)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003499 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003500 ctrl, vlan_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003501
3502 if (ctrl->term)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003503 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003504 ctrl, vlan_id);
3505
3506 return -EOPNOTSUPP;
3507}
3508
3509static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003510 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003511 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003512{
3513 int err = 0;
3514 int i;
3515
3516 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3517 if (rocker_port->ctrls[i]) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003518 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003519 &rocker_ctrls[i], vlan_id);
3520 if (err)
3521 return err;
3522 }
3523 }
3524
3525 return err;
3526}
3527
Scott Feldmanc4f20322015-05-10 09:47:50 -07003528static int rocker_port_ctrl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003529 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003530 const struct rocker_ctrl *ctrl)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003531{
3532 u16 vid;
3533 int err = 0;
3534
3535 for (vid = 1; vid < VLAN_N_VID; vid++) {
3536 if (!test_bit(vid, rocker_port->vlan_bitmap))
3537 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003538 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003539 ctrl, htons(vid));
3540 if (err)
3541 break;
3542 }
3543
3544 return err;
3545}
3546
Scott Feldmanc4f20322015-05-10 09:47:50 -07003547static int rocker_port_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003548 struct switchdev_trans *trans, int flags, u16 vid)
Scott Feldman6c707942014-11-28 14:34:28 +01003549{
3550 enum rocker_of_dpa_table_id goto_tbl =
3551 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003552 u32 in_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003553 __be16 vlan_id = htons(vid);
3554 __be16 vlan_id_mask = htons(0xffff);
3555 __be16 internal_vlan_id;
3556 bool untagged;
3557 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3558 int err;
3559
3560 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3561
Scott Feldman9228ad22015-05-10 09:47:54 -07003562 if (adding && test_bit(ntohs(internal_vlan_id),
3563 rocker_port->vlan_bitmap))
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003564 return 0; /* already added */
Scott Feldman9228ad22015-05-10 09:47:54 -07003565 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3566 rocker_port->vlan_bitmap))
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003567 return 0; /* already removed */
Scott Feldman6c707942014-11-28 14:34:28 +01003568
Scott Feldman9228ad22015-05-10 09:47:54 -07003569 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3570
Scott Feldman6c707942014-11-28 14:34:28 +01003571 if (adding) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003572 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003573 internal_vlan_id);
3574 if (err) {
3575 netdev_err(rocker_port->dev,
3576 "Error (%d) port ctrl vlan add\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003577 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003578 }
3579 }
3580
Jiri Pirko76c6f942015-09-24 10:02:44 +02003581 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003582 internal_vlan_id, untagged);
3583 if (err) {
3584 netdev_err(rocker_port->dev,
3585 "Error (%d) port VLAN l2 groups\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003586 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003587 }
3588
Jiri Pirko76c6f942015-09-24 10:02:44 +02003589 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003590 internal_vlan_id);
3591 if (err) {
3592 netdev_err(rocker_port->dev,
3593 "Error (%d) port VLAN l2 flood group\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003594 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003595 }
3596
Jiri Pirko76c6f942015-09-24 10:02:44 +02003597 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003598 in_pport, vlan_id, vlan_id_mask,
Scott Feldman6c707942014-11-28 14:34:28 +01003599 goto_tbl, untagged, internal_vlan_id);
3600 if (err)
3601 netdev_err(rocker_port->dev,
3602 "Error (%d) port VLAN table\n", err);
3603
Scott Feldman9228ad22015-05-10 09:47:54 -07003604err_out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02003605 if (switchdev_trans_ph_prepare(trans))
Scott Feldman9228ad22015-05-10 09:47:54 -07003606 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3607
Scott Feldman6c707942014-11-28 14:34:28 +01003608 return err;
3609}
3610
Scott Feldmanc4f20322015-05-10 09:47:50 -07003611static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003612 struct switchdev_trans *trans, int flags)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003613{
3614 enum rocker_of_dpa_table_id goto_tbl;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003615 u32 in_pport;
3616 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003617 int err;
3618
3619 /* Normal Ethernet Frames. Matches pkts from any local physical
3620 * ports. Goto VLAN tbl.
3621 */
3622
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003623 in_pport = 0;
3624 in_pport_mask = 0xffff0000;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003625 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3626
Jiri Pirko76c6f942015-09-24 10:02:44 +02003627 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003628 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003629 goto_tbl);
3630 if (err)
3631 netdev_err(rocker_port->dev,
3632 "Error (%d) ingress port table entry\n", err);
3633
3634 return err;
3635}
3636
Scott Feldman6c707942014-11-28 14:34:28 +01003637struct rocker_fdb_learn_work {
3638 struct work_struct work;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003639 struct rocker_port *rocker_port;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003640 struct switchdev_trans *trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003641 int flags;
3642 u8 addr[ETH_ALEN];
3643 u16 vid;
3644};
3645
3646static void rocker_port_fdb_learn_work(struct work_struct *work)
3647{
Simon Hormane5054642015-05-25 14:28:36 +09003648 const struct rocker_fdb_learn_work *lw =
Scott Feldman6c707942014-11-28 14:34:28 +01003649 container_of(work, struct rocker_fdb_learn_work, work);
3650 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3651 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003652 struct switchdev_notifier_fdb_info info;
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003653
3654 info.addr = lw->addr;
3655 info.vid = lw->vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003656
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +01003657 rtnl_lock();
Thomas Graf51ace882014-11-28 14:34:32 +01003658 if (learned && removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003659 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003660 lw->rocker_port->dev, &info.info);
Thomas Graf51ace882014-11-28 14:34:32 +01003661 else if (learned && !removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003662 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003663 lw->rocker_port->dev, &info.info);
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +01003664 rtnl_unlock();
Scott Feldman6c707942014-11-28 14:34:28 +01003665
Jiri Pirkob15edf82016-02-16 15:14:39 +01003666 rocker_kfree(lw->trans, work);
Scott Feldman6c707942014-11-28 14:34:28 +01003667}
3668
3669static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003670 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003671 const u8 *addr, __be16 vlan_id)
Scott Feldman6c707942014-11-28 14:34:28 +01003672{
3673 struct rocker_fdb_learn_work *lw;
3674 enum rocker_of_dpa_table_id goto_tbl =
3675 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003676 u32 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003677 u32 tunnel_id = 0;
3678 u32 group_id = ROCKER_GROUP_NONE;
Scott Feldman5111f802014-11-28 14:34:30 +01003679 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
Scott Feldman6c707942014-11-28 14:34:28 +01003680 bool copy_to_cpu = false;
3681 int err;
3682
3683 if (rocker_port_is_bridged(rocker_port))
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003684 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003685
3686 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003687 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003688 NULL, vlan_id, tunnel_id, goto_tbl,
Scott Feldman6c707942014-11-28 14:34:28 +01003689 group_id, copy_to_cpu);
3690 if (err)
3691 return err;
3692 }
3693
Scott Feldman5111f802014-11-28 14:34:30 +01003694 if (!syncing)
3695 return 0;
3696
Scott Feldman6c707942014-11-28 14:34:28 +01003697 if (!rocker_port_is_bridged(rocker_port))
3698 return 0;
3699
Jiri Pirkob15edf82016-02-16 15:14:39 +01003700 lw = rocker_kzalloc(trans, flags, sizeof(*lw));
Scott Feldman6c707942014-11-28 14:34:28 +01003701 if (!lw)
3702 return -ENOMEM;
3703
3704 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3705
Scott Feldmanc4f20322015-05-10 09:47:50 -07003706 lw->rocker_port = rocker_port;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003707 lw->trans = trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003708 lw->flags = flags;
3709 ether_addr_copy(lw->addr, addr);
3710 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3711
Jiri Pirko76c6f942015-09-24 10:02:44 +02003712 if (switchdev_trans_ph_prepare(trans))
Jiri Pirkob15edf82016-02-16 15:14:39 +01003713 rocker_kfree(trans, lw);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003714 else
3715 schedule_work(&lw->work);
Scott Feldman6c707942014-11-28 14:34:28 +01003716
3717 return 0;
3718}
3719
3720static struct rocker_fdb_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003721rocker_fdb_tbl_find(const struct rocker *rocker,
3722 const struct rocker_fdb_tbl_entry *match)
Scott Feldman6c707942014-11-28 14:34:28 +01003723{
3724 struct rocker_fdb_tbl_entry *found;
3725
3726 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3727 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3728 return found;
3729
3730 return NULL;
3731}
3732
3733static int rocker_port_fdb(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003734 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003735 const unsigned char *addr,
3736 __be16 vlan_id, int flags)
3737{
3738 struct rocker *rocker = rocker_port->rocker;
3739 struct rocker_fdb_tbl_entry *fdb;
3740 struct rocker_fdb_tbl_entry *found;
3741 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3742 unsigned long lock_flags;
3743
Jiri Pirkob15edf82016-02-16 15:14:39 +01003744 fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
Scott Feldman6c707942014-11-28 14:34:28 +01003745 if (!fdb)
3746 return -ENOMEM;
3747
3748 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
Scott Feldmana471be42015-09-23 08:39:14 -07003749 fdb->touched = jiffies;
Scott Feldman4c660492015-09-23 08:39:15 -07003750 fdb->key.rocker_port = rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01003751 ether_addr_copy(fdb->key.addr, addr);
3752 fdb->key.vlan_id = vlan_id;
3753 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3754
3755 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3756
3757 found = rocker_fdb_tbl_find(rocker, fdb);
3758
Scott Feldmana471be42015-09-23 08:39:14 -07003759 if (found) {
3760 found->touched = jiffies;
3761 if (removing) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01003762 rocker_kfree(trans, fdb);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003763 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmana471be42015-09-23 08:39:14 -07003764 hash_del(&found->entry);
3765 }
3766 } else if (!removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003767 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmana471be42015-09-23 08:39:14 -07003768 hash_add(rocker->fdb_tbl, &fdb->entry,
3769 fdb->key_crc32);
Scott Feldman6c707942014-11-28 14:34:28 +01003770 }
3771
3772 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3773
3774 /* Check if adding and already exists, or removing and can't find */
3775 if (!found != !removing) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01003776 rocker_kfree(trans, fdb);
Scott Feldman6c707942014-11-28 14:34:28 +01003777 if (!found && removing)
3778 return 0;
3779 /* Refreshing existing to update aging timers */
3780 flags |= ROCKER_OP_FLAG_REFRESH;
3781 }
3782
Jiri Pirko76c6f942015-09-24 10:02:44 +02003783 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003784}
3785
Scott Feldmanc4f20322015-05-10 09:47:50 -07003786static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003787 struct switchdev_trans *trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003788{
3789 struct rocker *rocker = rocker_port->rocker;
3790 struct rocker_fdb_tbl_entry *found;
3791 unsigned long lock_flags;
Scott Feldman6c707942014-11-28 14:34:28 +01003792 struct hlist_node *tmp;
3793 int bkt;
3794 int err = 0;
3795
3796 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3797 rocker_port->stp_state == BR_STATE_FORWARDING)
3798 return 0;
3799
Jiri Pirkod33eeb62015-10-14 19:40:54 +02003800 flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
Scott Feldman179f9a22015-06-12 21:35:46 -07003801
Scott Feldman6c707942014-11-28 14:34:28 +01003802 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3803
3804 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07003805 if (found->key.rocker_port != rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +01003806 continue;
3807 if (!found->learned)
3808 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003809 err = rocker_port_fdb_learn(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003810 found->key.addr,
3811 found->key.vlan_id);
3812 if (err)
3813 goto err_out;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003814 if (!switchdev_trans_ph_prepare(trans))
Simon Horman3098ac32015-05-21 12:40:14 +09003815 hash_del(&found->entry);
Scott Feldman6c707942014-11-28 14:34:28 +01003816 }
3817
3818err_out:
3819 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3820
3821 return err;
3822}
3823
Scott Feldman52fe3e22015-09-23 08:39:18 -07003824static void rocker_fdb_cleanup(unsigned long data)
3825{
3826 struct rocker *rocker = (struct rocker *)data;
3827 struct rocker_port *rocker_port;
3828 struct rocker_fdb_tbl_entry *entry;
3829 struct hlist_node *tmp;
3830 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3831 unsigned long expires;
3832 unsigned long lock_flags;
3833 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3834 ROCKER_OP_FLAG_LEARNED;
3835 int bkt;
3836
3837 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3838
3839 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3840 if (!entry->learned)
3841 continue;
3842 rocker_port = entry->key.rocker_port;
3843 expires = entry->touched + rocker_port->ageing_time;
3844 if (time_before_eq(expires, jiffies)) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003845 rocker_port_fdb_learn(rocker_port, NULL,
Scott Feldman52fe3e22015-09-23 08:39:18 -07003846 flags, entry->key.addr,
3847 entry->key.vlan_id);
3848 hash_del(&entry->entry);
3849 } else if (time_before(expires, next_timer)) {
3850 next_timer = expires;
3851 }
3852 }
3853
3854 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3855
3856 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3857}
3858
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003859static int rocker_port_router_mac(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003860 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003861 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003862{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003863 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003864 __be16 eth_type;
3865 const u8 *dst_mac_mask = ff_mac;
3866 __be16 vlan_id_mask = htons(0xffff);
3867 bool copy_to_cpu = false;
3868 int err;
3869
3870 if (ntohs(vlan_id) == 0)
3871 vlan_id = rocker_port->internal_vlan_id;
3872
3873 eth_type = htons(ETH_P_IP);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003874 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003875 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003876 eth_type, rocker_port->dev->dev_addr,
3877 dst_mac_mask, vlan_id, vlan_id_mask,
3878 copy_to_cpu, flags);
3879 if (err)
3880 return err;
3881
3882 eth_type = htons(ETH_P_IPV6);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003883 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003884 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003885 eth_type, rocker_port->dev->dev_addr,
3886 dst_mac_mask, vlan_id, vlan_id_mask,
3887 copy_to_cpu, flags);
3888
3889 return err;
3890}
3891
Scott Feldmanc4f20322015-05-10 09:47:50 -07003892static int rocker_port_fwding(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003893 struct switchdev_trans *trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003894{
3895 bool pop_vlan;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003896 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003897 __be16 vlan_id;
3898 u16 vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003899 int err;
3900
3901 /* Port will be forwarding-enabled if its STP state is LEARNING
3902 * or FORWARDING. Traffic from CPU can still egress, regardless of
3903 * port STP state. Use L2 interface group on port VLANs as a way
3904 * to toggle port forwarding: if forwarding is disabled, L2
3905 * interface group will not exist.
3906 */
3907
3908 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3909 rocker_port->stp_state != BR_STATE_FORWARDING)
3910 flags |= ROCKER_OP_FLAG_REMOVE;
3911
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003912 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003913 for (vid = 1; vid < VLAN_N_VID; vid++) {
3914 if (!test_bit(vid, rocker_port->vlan_bitmap))
3915 continue;
3916 vlan_id = htons(vid);
3917 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003918 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003919 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003920 if (err) {
3921 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003922 "Error (%d) port VLAN l2 group for pport %d\n",
3923 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003924 return err;
3925 }
3926 }
3927
3928 return 0;
3929}
3930
Scott Feldmanc4f20322015-05-10 09:47:50 -07003931static int rocker_port_stp_update(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003932 struct switchdev_trans *trans, int flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07003933 u8 state)
Scott Feldman6c707942014-11-28 14:34:28 +01003934{
3935 bool want[ROCKER_CTRL_MAX] = { 0, };
Scott Feldmanc4f20322015-05-10 09:47:50 -07003936 bool prev_ctrls[ROCKER_CTRL_MAX];
Jiri Pirko76c6f942015-09-24 10:02:44 +02003937 u8 uninitialized_var(prev_state);
Scott Feldman6c707942014-11-28 14:34:28 +01003938 int err;
3939 int i;
3940
Jiri Pirko76c6f942015-09-24 10:02:44 +02003941 if (switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003942 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3943 prev_state = rocker_port->stp_state;
3944 }
3945
Scott Feldman6c707942014-11-28 14:34:28 +01003946 if (rocker_port->stp_state == state)
3947 return 0;
3948
3949 rocker_port->stp_state = state;
3950
3951 switch (state) {
3952 case BR_STATE_DISABLED:
3953 /* port is completely disabled */
3954 break;
3955 case BR_STATE_LISTENING:
3956 case BR_STATE_BLOCKING:
3957 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3958 break;
3959 case BR_STATE_LEARNING:
3960 case BR_STATE_FORWARDING:
Simon Horman82549732015-07-16 10:39:14 +09003961 if (!rocker_port_is_ovsed(rocker_port))
3962 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003963 want[ROCKER_CTRL_IPV4_MCAST] = true;
3964 want[ROCKER_CTRL_IPV6_MCAST] = true;
3965 if (rocker_port_is_bridged(rocker_port))
3966 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
Simon Horman82549732015-07-16 10:39:14 +09003967 else if (rocker_port_is_ovsed(rocker_port))
3968 want[ROCKER_CTRL_DFLT_OVS] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003969 else
3970 want[ROCKER_CTRL_LOCAL_ARP] = true;
3971 break;
3972 }
3973
3974 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3975 if (want[i] != rocker_port->ctrls[i]) {
Scott Feldman179f9a22015-06-12 21:35:46 -07003976 int ctrl_flags = flags |
3977 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003978 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003979 &rocker_ctrls[i]);
3980 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003981 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003982 rocker_port->ctrls[i] = want[i];
3983 }
3984 }
3985
Jiri Pirko76c6f942015-09-24 10:02:44 +02003986 err = rocker_port_fdb_flush(rocker_port, trans, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01003987 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003988 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003989
Jiri Pirko76c6f942015-09-24 10:02:44 +02003990 err = rocker_port_fwding(rocker_port, trans, flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003991
3992err_out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02003993 if (switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003994 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3995 rocker_port->stp_state = prev_state;
3996 }
3997
3998 return err;
Scott Feldman6c707942014-11-28 14:34:28 +01003999}
4000
Scott Feldmanc4f20322015-05-10 09:47:50 -07004001static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004002 struct switchdev_trans *trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08004003{
4004 if (rocker_port_is_bridged(rocker_port))
4005 /* bridge STP will enable port */
4006 return 0;
4007
4008 /* port is not bridged, so simulate going to FORWARDING state */
Jiri Pirko76c6f942015-09-24 10:02:44 +02004009 return rocker_port_stp_update(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07004010 BR_STATE_FORWARDING);
Scott Feldmane47172a2015-02-25 20:15:38 -08004011}
4012
Scott Feldmanc4f20322015-05-10 09:47:50 -07004013static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004014 struct switchdev_trans *trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08004015{
4016 if (rocker_port_is_bridged(rocker_port))
4017 /* bridge STP will disable port */
4018 return 0;
4019
4020 /* port is not bridged, so simulate going to DISABLED state */
Jiri Pirko76c6f942015-09-24 10:02:44 +02004021 return rocker_port_stp_update(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07004022 BR_STATE_DISABLED);
Scott Feldmane47172a2015-02-25 20:15:38 -08004023}
4024
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004025static struct rocker_internal_vlan_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09004026rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004027{
4028 struct rocker_internal_vlan_tbl_entry *found;
4029
4030 hash_for_each_possible(rocker->internal_vlan_tbl, found,
4031 entry, ifindex) {
4032 if (found->ifindex == ifindex)
4033 return found;
4034 }
4035
4036 return NULL;
4037}
4038
4039static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
4040 int ifindex)
4041{
4042 struct rocker *rocker = rocker_port->rocker;
4043 struct rocker_internal_vlan_tbl_entry *entry;
4044 struct rocker_internal_vlan_tbl_entry *found;
4045 unsigned long lock_flags;
4046 int i;
4047
Simon Hormandf6a2062015-05-21 12:40:17 +09004048 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004049 if (!entry)
4050 return 0;
4051
4052 entry->ifindex = ifindex;
4053
4054 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4055
4056 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4057 if (found) {
Simon Hormandf6a2062015-05-21 12:40:17 +09004058 kfree(entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004059 goto found;
4060 }
4061
4062 found = entry;
4063 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
4064
4065 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
4066 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
4067 continue;
4068 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
4069 goto found;
4070 }
4071
4072 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
4073
4074found:
4075 found->ref_count++;
4076 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4077
4078 return found->vlan_id;
4079}
4080
Simon Hormane5054642015-05-25 14:28:36 +09004081static void
4082rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
4083 int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004084{
4085 struct rocker *rocker = rocker_port->rocker;
4086 struct rocker_internal_vlan_tbl_entry *found;
4087 unsigned long lock_flags;
4088 unsigned long bit;
4089
4090 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4091
4092 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4093 if (!found) {
4094 netdev_err(rocker_port->dev,
4095 "ifindex (%d) not found in internal VLAN tbl\n",
4096 ifindex);
4097 goto not_found;
4098 }
4099
4100 if (--found->ref_count <= 0) {
4101 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
4102 clear_bit(bit, rocker->internal_vlan_bitmap);
4103 hash_del(&found->entry);
Simon Hormandf6a2062015-05-21 12:40:17 +09004104 kfree(found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004105 }
4106
4107not_found:
4108 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4109}
4110
Scott Feldmanc4f20322015-05-10 09:47:50 -07004111static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004112 struct switchdev_trans *trans, __be32 dst,
Simon Hormane5054642015-05-25 14:28:36 +09004113 int dst_len, const struct fib_info *fi,
4114 u32 tb_id, int flags)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004115{
Simon Hormane5054642015-05-25 14:28:36 +09004116 const struct fib_nh *nh;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004117 __be16 eth_type = htons(ETH_P_IP);
4118 __be32 dst_mask = inet_make_mask(dst_len);
4119 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4120 u32 priority = fi->fib_priority;
4121 enum rocker_of_dpa_table_id goto_tbl =
4122 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4123 u32 group_id;
4124 bool nh_on_port;
4125 bool has_gw;
4126 u32 index;
4127 int err;
4128
4129 /* XXX support ECMP */
4130
4131 nh = fi->fib_nh;
4132 nh_on_port = (fi->fib_dev == rocker_port->dev);
4133 has_gw = !!nh->nh_gw;
4134
4135 if (has_gw && nh_on_port) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02004136 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004137 nh->nh_gw, &index);
4138 if (err)
4139 return err;
4140
4141 group_id = ROCKER_GROUP_L3_UNICAST(index);
4142 } else {
4143 /* Send to CPU for processing */
4144 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4145 }
4146
Jiri Pirko76c6f942015-09-24 10:02:44 +02004147 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004148 dst_mask, priority, goto_tbl,
4149 group_id, flags);
4150 if (err)
4151 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4152 err, &dst);
4153
4154 return err;
4155}
4156
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004157/*****************
4158 * Net device ops
4159 *****************/
4160
4161static int rocker_port_open(struct net_device *dev)
4162{
4163 struct rocker_port *rocker_port = netdev_priv(dev);
4164 int err;
4165
4166 err = rocker_port_dma_rings_init(rocker_port);
4167 if (err)
4168 return err;
4169
4170 err = request_irq(rocker_msix_tx_vector(rocker_port),
4171 rocker_tx_irq_handler, 0,
4172 rocker_driver_name, rocker_port);
4173 if (err) {
4174 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4175 goto err_request_tx_irq;
4176 }
4177
4178 err = request_irq(rocker_msix_rx_vector(rocker_port),
4179 rocker_rx_irq_handler, 0,
4180 rocker_driver_name, rocker_port);
4181 if (err) {
4182 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4183 goto err_request_rx_irq;
4184 }
4185
Jiri Pirkoe4201142016-02-16 15:14:45 +01004186 err = rocker_world_port_open(rocker_port);
4187 if (err) {
4188 netdev_err(rocker_port->dev, "cannot open port in world\n");
4189 goto err_world_port_open;
4190 }
4191
Jiri Pirko76c6f942015-09-24 10:02:44 +02004192 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01004193 if (err)
Scott Feldmane47172a2015-02-25 20:15:38 -08004194 goto err_fwd_enable;
Scott Feldman6c707942014-11-28 14:34:28 +01004195
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004196 napi_enable(&rocker_port->napi_tx);
4197 napi_enable(&rocker_port->napi_rx);
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004198 if (!dev->proto_down)
4199 rocker_port_set_enable(rocker_port, true);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004200 netif_start_queue(dev);
4201 return 0;
4202
Scott Feldmane47172a2015-02-25 20:15:38 -08004203err_fwd_enable:
Jiri Pirkoe4201142016-02-16 15:14:45 +01004204err_world_port_open:
Scott Feldman6c707942014-11-28 14:34:28 +01004205 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004206err_request_rx_irq:
4207 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4208err_request_tx_irq:
4209 rocker_port_dma_rings_fini(rocker_port);
4210 return err;
4211}
4212
4213static int rocker_port_stop(struct net_device *dev)
4214{
4215 struct rocker_port *rocker_port = netdev_priv(dev);
4216
4217 netif_stop_queue(dev);
4218 rocker_port_set_enable(rocker_port, false);
4219 napi_disable(&rocker_port->napi_rx);
4220 napi_disable(&rocker_port->napi_tx);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004221 rocker_world_port_stop(rocker_port);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004222 rocker_port_fwd_disable(rocker_port, NULL,
Scott Feldmanf66feaa2015-06-12 21:35:50 -07004223 ROCKER_OP_FLAG_NOWAIT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004224 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4225 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4226 rocker_port_dma_rings_fini(rocker_port);
4227
4228 return 0;
4229}
4230
Simon Hormane5054642015-05-25 14:28:36 +09004231static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4232 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004233{
Simon Hormane5054642015-05-25 14:28:36 +09004234 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004235 struct pci_dev *pdev = rocker->pdev;
Simon Hormane5054642015-05-25 14:28:36 +09004236 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004237 struct rocker_tlv *attr;
4238 int rem;
4239
4240 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4241 if (!attrs[ROCKER_TLV_TX_FRAGS])
4242 return;
4243 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
Simon Hormane5054642015-05-25 14:28:36 +09004244 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004245 dma_addr_t dma_handle;
4246 size_t len;
4247
4248 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4249 continue;
4250 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4251 attr);
4252 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4253 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4254 continue;
4255 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4256 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4257 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4258 }
4259}
4260
Simon Hormane5054642015-05-25 14:28:36 +09004261static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004262 struct rocker_desc_info *desc_info,
4263 char *buf, size_t buf_len)
4264{
Simon Hormane5054642015-05-25 14:28:36 +09004265 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004266 struct pci_dev *pdev = rocker->pdev;
4267 dma_addr_t dma_handle;
4268 struct rocker_tlv *frag;
4269
4270 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4271 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4272 if (net_ratelimit())
4273 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4274 return -EIO;
4275 }
4276 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4277 if (!frag)
4278 goto unmap_frag;
4279 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4280 dma_handle))
4281 goto nest_cancel;
4282 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4283 buf_len))
4284 goto nest_cancel;
4285 rocker_tlv_nest_end(desc_info, frag);
4286 return 0;
4287
4288nest_cancel:
4289 rocker_tlv_nest_cancel(desc_info, frag);
4290unmap_frag:
4291 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4292 return -EMSGSIZE;
4293}
4294
4295static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4296{
4297 struct rocker_port *rocker_port = netdev_priv(dev);
4298 struct rocker *rocker = rocker_port->rocker;
4299 struct rocker_desc_info *desc_info;
4300 struct rocker_tlv *frags;
4301 int i;
4302 int err;
4303
4304 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4305 if (unlikely(!desc_info)) {
4306 if (net_ratelimit())
4307 netdev_err(dev, "tx ring full when queue awake\n");
4308 return NETDEV_TX_BUSY;
4309 }
4310
4311 rocker_desc_cookie_ptr_set(desc_info, skb);
4312
4313 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4314 if (!frags)
4315 goto out;
4316 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4317 skb->data, skb_headlen(skb));
4318 if (err)
4319 goto nest_cancel;
Jiri Pirko95b9be62015-08-02 20:56:38 +02004320 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4321 err = skb_linearize(skb);
4322 if (err)
4323 goto unmap_frags;
4324 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004325
4326 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4327 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4328
4329 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4330 skb_frag_address(frag),
4331 skb_frag_size(frag));
4332 if (err)
4333 goto unmap_frags;
4334 }
4335 rocker_tlv_nest_end(desc_info, frags);
4336
4337 rocker_desc_gen_clear(desc_info);
4338 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4339
4340 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4341 if (!desc_info)
4342 netif_stop_queue(dev);
4343
4344 return NETDEV_TX_OK;
4345
4346unmap_frags:
4347 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4348nest_cancel:
4349 rocker_tlv_nest_cancel(desc_info, frags);
4350out:
4351 dev_kfree_skb(skb);
David Ahernf2bbca52015-01-16 14:22:29 -07004352 dev->stats.tx_dropped++;
4353
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004354 return NETDEV_TX_OK;
4355}
4356
4357static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4358{
4359 struct sockaddr *addr = p;
4360 struct rocker_port *rocker_port = netdev_priv(dev);
4361 int err;
4362
4363 if (!is_valid_ether_addr(addr->sa_data))
4364 return -EADDRNOTAVAIL;
4365
4366 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4367 if (err)
4368 return err;
4369 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4370 return 0;
4371}
4372
Scott Feldman77a58c72015-07-08 16:06:47 -07004373static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4374{
4375 struct rocker_port *rocker_port = netdev_priv(dev);
4376 int running = netif_running(dev);
4377 int err;
4378
4379#define ROCKER_PORT_MIN_MTU 68
4380#define ROCKER_PORT_MAX_MTU 9000
4381
4382 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4383 return -EINVAL;
4384
4385 if (running)
4386 rocker_port_stop(dev);
4387
4388 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4389 dev->mtu = new_mtu;
4390
4391 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4392 if (err)
4393 return err;
4394
4395 if (running)
4396 err = rocker_port_open(dev);
4397
4398 return err;
4399}
4400
David Aherndb191702015-03-17 20:23:16 -06004401static int rocker_port_get_phys_port_name(struct net_device *dev,
4402 char *buf, size_t len)
4403{
4404 struct rocker_port *rocker_port = netdev_priv(dev);
4405 struct port_name name = { .buf = buf, .len = len };
4406 int err;
4407
Jiri Pirko76c6f942015-09-24 10:02:44 +02004408 err = rocker_cmd_exec(rocker_port, NULL, 0,
David Aherndb191702015-03-17 20:23:16 -06004409 rocker_cmd_get_port_settings_prep, NULL,
4410 rocker_cmd_get_port_settings_phys_name_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004411 &name);
David Aherndb191702015-03-17 20:23:16 -06004412
4413 return err ? -EOPNOTSUPP : 0;
4414}
4415
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004416static int rocker_port_change_proto_down(struct net_device *dev,
4417 bool proto_down)
4418{
4419 struct rocker_port *rocker_port = netdev_priv(dev);
4420
4421 if (rocker_port->dev->flags & IFF_UP)
4422 rocker_port_set_enable(rocker_port, !proto_down);
4423 rocker_port->dev->proto_down = proto_down;
4424 return 0;
4425}
4426
Scott Feldmandd19f832015-08-12 18:45:25 -07004427static void rocker_port_neigh_destroy(struct neighbour *n)
4428{
4429 struct rocker_port *rocker_port = netdev_priv(n->dev);
4430 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4431 __be32 ip_addr = *(__be32 *)n->primary_key;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004432 int err;
Scott Feldmandd19f832015-08-12 18:45:25 -07004433
Jiri Pirko76c6f942015-09-24 10:02:44 +02004434 rocker_port_ipv4_neigh(rocker_port, NULL,
Scott Feldmandd19f832015-08-12 18:45:25 -07004435 flags, ip_addr, n->ha);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004436 err = rocker_world_port_neigh_destroy(rocker_port, n);
4437 if (err)
4438 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
4439 err);
Scott Feldmandd19f832015-08-12 18:45:25 -07004440}
4441
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004442static const struct net_device_ops rocker_port_netdev_ops = {
4443 .ndo_open = rocker_port_open,
4444 .ndo_stop = rocker_port_stop,
4445 .ndo_start_xmit = rocker_port_xmit,
4446 .ndo_set_mac_address = rocker_port_set_mac_address,
Scott Feldman77a58c72015-07-08 16:06:47 -07004447 .ndo_change_mtu = rocker_port_change_mtu,
Scott Feldman85fdb952015-05-10 09:48:05 -07004448 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
Scott Feldmanfc8f40d2015-05-10 09:47:57 -07004449 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
Scott Feldman54ba5a02015-05-10 09:48:01 -07004450 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004451 .ndo_fdb_add = switchdev_port_fdb_add,
4452 .ndo_fdb_del = switchdev_port_fdb_del,
4453 .ndo_fdb_dump = switchdev_port_fdb_dump,
David Aherndb191702015-03-17 20:23:16 -06004454 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004455 .ndo_change_proto_down = rocker_port_change_proto_down,
Scott Feldmandd19f832015-08-12 18:45:25 -07004456 .ndo_neigh_destroy = rocker_port_neigh_destroy,
Scott Feldman98237d42015-03-15 21:07:15 -07004457};
4458
4459/********************
4460 * swdev interface
4461 ********************/
4462
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004463static int rocker_port_attr_get(struct net_device *dev,
4464 struct switchdev_attr *attr)
Scott Feldman98237d42015-03-15 21:07:15 -07004465{
Simon Hormane5054642015-05-25 14:28:36 +09004466 const struct rocker_port *rocker_port = netdev_priv(dev);
4467 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004468 int err = 0;
Scott Feldman98237d42015-03-15 21:07:15 -07004469
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004470 switch (attr->id) {
Jiri Pirko1f868392015-10-01 11:03:42 +02004471 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
Scott Feldman42275bd2015-05-13 11:16:50 -07004472 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4473 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004474 break;
Jiri Pirko1f868392015-10-01 11:03:42 +02004475 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
Scott Feldman42275bd2015-05-13 11:16:50 -07004476 attr->u.brport_flags = rocker_port->brport_flags;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004477 err = rocker_world_port_attr_bridge_flags_get(rocker_port,
4478 &attr->u.brport_flags);
Scott Feldman6004c862015-05-10 09:47:55 -07004479 break;
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004480 default:
4481 return -EOPNOTSUPP;
4482 }
4483
Jiri Pirkoe4201142016-02-16 15:14:45 +01004484 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004485}
4486
Scott Feldman6004c862015-05-10 09:47:55 -07004487static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004488 struct switchdev_trans *trans,
Scott Feldman6004c862015-05-10 09:47:55 -07004489 unsigned long brport_flags)
4490{
4491 unsigned long orig_flags;
4492 int err = 0;
4493
4494 orig_flags = rocker_port->brport_flags;
4495 rocker_port->brport_flags = brport_flags;
4496 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01004497 err = rocker_port_set_learning(rocker_port, trans,
4498 !!(rocker_port->brport_flags & BR_LEARNING));
Scott Feldman6004c862015-05-10 09:47:55 -07004499
Jiri Pirko76c6f942015-09-24 10:02:44 +02004500 if (switchdev_trans_ph_prepare(trans))
Scott Feldman6004c862015-05-10 09:47:55 -07004501 rocker_port->brport_flags = orig_flags;
4502
4503 return err;
4504}
4505
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004506static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4507 struct switchdev_trans *trans,
4508 u32 ageing_time)
4509{
4510 if (!switchdev_trans_ph_prepare(trans)) {
4511 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4512 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4513 }
4514
4515 return 0;
4516}
4517
Scott Feldmanc4f20322015-05-10 09:47:50 -07004518static int rocker_port_attr_set(struct net_device *dev,
Jiri Pirkof7fadf32015-10-14 19:40:49 +02004519 const struct switchdev_attr *attr,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02004520 struct switchdev_trans *trans)
Scott Feldmanc4f20322015-05-10 09:47:50 -07004521{
4522 struct rocker_port *rocker_port = netdev_priv(dev);
4523 int err = 0;
4524
Scott Feldmanc4f20322015-05-10 09:47:50 -07004525 switch (attr->id) {
Jiri Pirko1f868392015-10-01 11:03:42 +02004526 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
Jiri Pirkod33eeb62015-10-14 19:40:54 +02004527 err = rocker_port_stp_update(rocker_port, trans, 0,
Scott Feldman42275bd2015-05-13 11:16:50 -07004528 attr->u.stp_state);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004529 if (err)
4530 break;
4531 err = rocker_world_port_attr_stp_state_set(rocker_port,
4532 attr->u.stp_state,
4533 trans);
Scott Feldman35636062015-05-10 09:47:51 -07004534 break;
Jiri Pirko1f868392015-10-01 11:03:42 +02004535 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
Jiri Pirko76c6f942015-09-24 10:02:44 +02004536 err = rocker_port_brport_flags_set(rocker_port, trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004537 attr->u.brport_flags);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004538 if (err)
4539 break;
4540 err = rocker_world_port_attr_bridge_flags_set(rocker_port,
4541 attr->u.brport_flags,
4542 trans);
Scott Feldman6004c862015-05-10 09:47:55 -07004543 break;
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004544 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4545 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4546 attr->u.ageing_time);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004547 if (err)
4548 break;
4549 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
4550 attr->u.ageing_time,
4551 trans);
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004552 break;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004553 default:
4554 err = -EOPNOTSUPP;
4555 break;
4556 }
4557
4558 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004559}
4560
Scott Feldman9228ad22015-05-10 09:47:54 -07004561static int rocker_port_vlan_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004562 struct switchdev_trans *trans,
4563 u16 vid, u16 flags)
Scott Feldman9228ad22015-05-10 09:47:54 -07004564{
4565 int err;
4566
4567 /* XXX deal with flags for PVID and untagged */
4568
Jiri Pirko76c6f942015-09-24 10:02:44 +02004569 err = rocker_port_vlan(rocker_port, trans, 0, vid);
Scott Feldman9228ad22015-05-10 09:47:54 -07004570 if (err)
4571 return err;
4572
Jiri Pirko76c6f942015-09-24 10:02:44 +02004573 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
Scott Feldmancec04a62015-06-01 11:39:03 -07004574 if (err)
Jiri Pirko76c6f942015-09-24 10:02:44 +02004575 rocker_port_vlan(rocker_port, trans,
Scott Feldmancec04a62015-06-01 11:39:03 -07004576 ROCKER_OP_FLAG_REMOVE, vid);
4577
4578 return err;
Scott Feldman9228ad22015-05-10 09:47:54 -07004579}
4580
4581static int rocker_port_vlans_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004582 struct switchdev_trans *trans,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004583 const struct switchdev_obj_port_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004584{
4585 u16 vid;
4586 int err;
4587
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004588 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02004589 err = rocker_port_vlan_add(rocker_port, trans,
Scott Feldman9228ad22015-05-10 09:47:54 -07004590 vid, vlan->flags);
4591 if (err)
4592 return err;
4593 }
4594
4595 return 0;
4596}
4597
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004598static int rocker_port_fdb_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004599 struct switchdev_trans *trans,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004600 const struct switchdev_obj_port_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004601{
4602 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4603 int flags = 0;
4604
4605 if (!rocker_port_is_bridged(rocker_port))
4606 return -EINVAL;
4607
Jiri Pirko76c6f942015-09-24 10:02:44 +02004608 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004609}
4610
Scott Feldman9228ad22015-05-10 09:47:54 -07004611static int rocker_port_obj_add(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004612 const struct switchdev_obj *obj,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02004613 struct switchdev_trans *trans)
Scott Feldman9228ad22015-05-10 09:47:54 -07004614{
4615 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004616 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004617 int err = 0;
4618
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004619 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004620 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004621 err = rocker_port_vlans_add(rocker_port, trans,
4622 SWITCHDEV_OBJ_PORT_VLAN(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004623 if (err)
4624 break;
4625 err = rocker_world_port_obj_vlan_add(rocker_port,
4626 SWITCHDEV_OBJ_PORT_VLAN(obj),
4627 trans);
Scott Feldman9228ad22015-05-10 09:47:54 -07004628 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004629 case SWITCHDEV_OBJ_ID_IPV4_FIB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004630 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004631 err = rocker_port_fib_ipv4(rocker_port, trans,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004632 htonl(fib4->dst), fib4->dst_len,
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004633 &fib4->fi, fib4->tb_id, 0);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004634 if (err)
4635 break;
4636 err = rocker_world_port_obj_fib4_add(rocker_port,
4637 SWITCHDEV_OBJ_IPV4_FIB(obj),
4638 trans);
Scott Feldman58c2cb12015-05-10 09:48:06 -07004639 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004640 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004641 err = rocker_port_fdb_add(rocker_port, trans,
4642 SWITCHDEV_OBJ_PORT_FDB(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004643 if (err)
4644 break;
4645 err = rocker_world_port_obj_fdb_add(rocker_port,
4646 SWITCHDEV_OBJ_PORT_FDB(obj),
4647 trans);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004648 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004649 default:
4650 err = -EOPNOTSUPP;
4651 break;
4652 }
4653
4654 return err;
4655}
4656
4657static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4658 u16 vid, u16 flags)
4659{
4660 int err;
4661
Jiri Pirko76c6f942015-09-24 10:02:44 +02004662 err = rocker_port_router_mac(rocker_port, NULL,
Scott Feldman9228ad22015-05-10 09:47:54 -07004663 ROCKER_OP_FLAG_REMOVE, htons(vid));
4664 if (err)
4665 return err;
4666
Jiri Pirko76c6f942015-09-24 10:02:44 +02004667 return rocker_port_vlan(rocker_port, NULL,
Scott Feldman9228ad22015-05-10 09:47:54 -07004668 ROCKER_OP_FLAG_REMOVE, vid);
4669}
4670
4671static int rocker_port_vlans_del(struct rocker_port *rocker_port,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004672 const struct switchdev_obj_port_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004673{
4674 u16 vid;
4675 int err;
4676
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004677 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004678 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4679 if (err)
4680 return err;
4681 }
4682
4683 return 0;
4684}
4685
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004686static int rocker_port_fdb_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004687 struct switchdev_trans *trans,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004688 const struct switchdev_obj_port_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004689{
4690 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
Jiri Pirkod33eeb62015-10-14 19:40:54 +02004691 int flags = ROCKER_OP_FLAG_REMOVE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004692
4693 if (!rocker_port_is_bridged(rocker_port))
4694 return -EINVAL;
4695
Jiri Pirko76c6f942015-09-24 10:02:44 +02004696 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004697}
4698
Scott Feldman9228ad22015-05-10 09:47:54 -07004699static int rocker_port_obj_del(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004700 const struct switchdev_obj *obj)
Scott Feldman9228ad22015-05-10 09:47:54 -07004701{
4702 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004703 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004704 int err = 0;
4705
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004706 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004707 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004708 err = rocker_port_vlans_del(rocker_port,
4709 SWITCHDEV_OBJ_PORT_VLAN(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004710 if (err)
4711 break;
4712 err = rocker_world_port_obj_vlan_del(rocker_port,
4713 SWITCHDEV_OBJ_PORT_VLAN(obj));
Scott Feldman9228ad22015-05-10 09:47:54 -07004714 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004715 case SWITCHDEV_OBJ_ID_IPV4_FIB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004716 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004717 err = rocker_port_fib_ipv4(rocker_port, NULL,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004718 htonl(fib4->dst), fib4->dst_len,
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004719 &fib4->fi, fib4->tb_id,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004720 ROCKER_OP_FLAG_REMOVE);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004721 if (err)
4722 break;
4723 err = rocker_world_port_obj_fib4_del(rocker_port,
4724 SWITCHDEV_OBJ_IPV4_FIB(obj));
Scott Feldman58c2cb12015-05-10 09:48:06 -07004725 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004726 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004727 err = rocker_port_fdb_del(rocker_port, NULL,
4728 SWITCHDEV_OBJ_PORT_FDB(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004729 if (err)
4730 break;
4731 err = rocker_world_port_obj_fdb_del(rocker_port,
4732 SWITCHDEV_OBJ_PORT_FDB(obj));
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004733 break;
4734 default:
4735 err = -EOPNOTSUPP;
4736 break;
4737 }
4738
4739 return err;
4740}
4741
Simon Hormane5054642015-05-25 14:28:36 +09004742static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004743 struct switchdev_obj_port_fdb *fdb,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004744 switchdev_obj_dump_cb_t *cb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004745{
4746 struct rocker *rocker = rocker_port->rocker;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004747 struct rocker_fdb_tbl_entry *found;
4748 struct hlist_node *tmp;
4749 unsigned long lock_flags;
4750 int bkt;
4751 int err = 0;
4752
4753 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4754 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07004755 if (found->key.rocker_port != rocker_port)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004756 continue;
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004757 ether_addr_copy(fdb->addr, found->key.addr);
Vivien Didelotce80e7b2015-08-10 09:09:52 -04004758 fdb->ndm_state = NUD_REACHABLE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004759 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4760 found->key.vlan_id);
Jiri Pirko648b4a92015-10-01 11:03:45 +02004761 err = cb(&fdb->obj);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004762 if (err)
4763 break;
4764 }
4765 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4766
4767 return err;
4768}
4769
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004770static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004771 struct switchdev_obj_port_vlan *vlan,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004772 switchdev_obj_dump_cb_t *cb)
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004773{
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004774 u16 vid;
4775 int err = 0;
4776
4777 for (vid = 1; vid < VLAN_N_VID; vid++) {
4778 if (!test_bit(vid, rocker_port->vlan_bitmap))
4779 continue;
4780 vlan->flags = 0;
4781 if (rocker_vlan_id_is_internal(htons(vid)))
4782 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01004783 vlan->vid_begin = vid;
4784 vlan->vid_end = vid;
Jiri Pirko648b4a92015-10-01 11:03:45 +02004785 err = cb(&vlan->obj);
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004786 if (err)
4787 break;
4788 }
4789
4790 return err;
4791}
4792
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004793static int rocker_port_obj_dump(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004794 struct switchdev_obj *obj,
4795 switchdev_obj_dump_cb_t *cb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004796{
Simon Hormane5054642015-05-25 14:28:36 +09004797 const struct rocker_port *rocker_port = netdev_priv(dev);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004798 int err = 0;
4799
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004800 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004801 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004802 err = rocker_port_fdb_dump(rocker_port,
4803 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004804 if (err)
4805 break;
4806 err = rocker_world_port_obj_fdb_dump(rocker_port,
4807 SWITCHDEV_OBJ_PORT_FDB(obj),
4808 cb);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004809 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004810 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004811 err = rocker_port_vlan_dump(rocker_port,
4812 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004813 if (err)
4814 break;
4815 err = rocker_world_port_obj_vlan_dump(rocker_port,
4816 SWITCHDEV_OBJ_PORT_VLAN(obj),
4817 cb);
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004818 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004819 default:
4820 err = -EOPNOTSUPP;
4821 break;
4822 }
4823
4824 return err;
4825}
4826
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004827static const struct switchdev_ops rocker_port_switchdev_ops = {
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004828 .switchdev_port_attr_get = rocker_port_attr_get,
Scott Feldman35636062015-05-10 09:47:51 -07004829 .switchdev_port_attr_set = rocker_port_attr_set,
Scott Feldman9228ad22015-05-10 09:47:54 -07004830 .switchdev_port_obj_add = rocker_port_obj_add,
4831 .switchdev_port_obj_del = rocker_port_obj_del,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004832 .switchdev_port_obj_dump = rocker_port_obj_dump,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004833};
4834
4835/********************
4836 * ethtool interface
4837 ********************/
4838
4839static int rocker_port_get_settings(struct net_device *dev,
4840 struct ethtool_cmd *ecmd)
4841{
4842 struct rocker_port *rocker_port = netdev_priv(dev);
4843
4844 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4845}
4846
4847static int rocker_port_set_settings(struct net_device *dev,
4848 struct ethtool_cmd *ecmd)
4849{
4850 struct rocker_port *rocker_port = netdev_priv(dev);
4851
4852 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4853}
4854
4855static void rocker_port_get_drvinfo(struct net_device *dev,
4856 struct ethtool_drvinfo *drvinfo)
4857{
4858 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4859 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4860}
4861
David Ahern9766e972015-01-29 20:59:33 -07004862static struct rocker_port_stats {
4863 char str[ETH_GSTRING_LEN];
4864 int type;
4865} rocker_port_stats[] = {
4866 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4867 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4868 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4869 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4870
4871 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4872 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4873 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4874 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4875};
4876
4877#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4878
4879static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4880 u8 *data)
4881{
4882 u8 *p = data;
4883 int i;
4884
4885 switch (stringset) {
4886 case ETH_SS_STATS:
4887 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4888 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4889 p += ETH_GSTRING_LEN;
4890 }
4891 break;
4892 }
4893}
4894
4895static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004896rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
David Ahern9766e972015-01-29 20:59:33 -07004897 struct rocker_desc_info *desc_info,
4898 void *priv)
4899{
4900 struct rocker_tlv *cmd_stats;
4901
4902 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4903 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4904 return -EMSGSIZE;
4905
4906 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4907 if (!cmd_stats)
4908 return -EMSGSIZE;
4909
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004910 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4911 rocker_port->pport))
David Ahern9766e972015-01-29 20:59:33 -07004912 return -EMSGSIZE;
4913
4914 rocker_tlv_nest_end(desc_info, cmd_stats);
4915
4916 return 0;
4917}
4918
4919static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004920rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004921 const struct rocker_desc_info *desc_info,
David Ahern9766e972015-01-29 20:59:33 -07004922 void *priv)
4923{
Simon Hormane5054642015-05-25 14:28:36 +09004924 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4925 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4926 const struct rocker_tlv *pattr;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004927 u32 pport;
David Ahern9766e972015-01-29 20:59:33 -07004928 u64 *data = priv;
4929 int i;
4930
4931 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4932
4933 if (!attrs[ROCKER_TLV_CMD_INFO])
4934 return -EIO;
4935
4936 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4937 attrs[ROCKER_TLV_CMD_INFO]);
4938
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004939 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
David Ahern9766e972015-01-29 20:59:33 -07004940 return -EIO;
4941
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004942 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4943 if (pport != rocker_port->pport)
David Ahern9766e972015-01-29 20:59:33 -07004944 return -EIO;
4945
4946 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4947 pattr = stats_attrs[rocker_port_stats[i].type];
4948 if (!pattr)
4949 continue;
4950
4951 data[i] = rocker_tlv_get_u64(pattr);
4952 }
4953
4954 return 0;
4955}
4956
4957static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4958 void *priv)
4959{
Jiri Pirko76c6f942015-09-24 10:02:44 +02004960 return rocker_cmd_exec(rocker_port, NULL, 0,
David Ahern9766e972015-01-29 20:59:33 -07004961 rocker_cmd_get_port_stats_prep, NULL,
4962 rocker_cmd_get_port_stats_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004963 priv);
David Ahern9766e972015-01-29 20:59:33 -07004964}
4965
4966static void rocker_port_get_stats(struct net_device *dev,
4967 struct ethtool_stats *stats, u64 *data)
4968{
4969 struct rocker_port *rocker_port = netdev_priv(dev);
4970
4971 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4972 int i;
4973
4974 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4975 data[i] = 0;
4976 }
David Ahern9766e972015-01-29 20:59:33 -07004977}
4978
4979static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4980{
4981 switch (sset) {
4982 case ETH_SS_STATS:
4983 return ROCKER_PORT_STATS_LEN;
4984 default:
4985 return -EOPNOTSUPP;
4986 }
4987}
4988
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004989static const struct ethtool_ops rocker_port_ethtool_ops = {
4990 .get_settings = rocker_port_get_settings,
4991 .set_settings = rocker_port_set_settings,
4992 .get_drvinfo = rocker_port_get_drvinfo,
4993 .get_link = ethtool_op_get_link,
David Ahern9766e972015-01-29 20:59:33 -07004994 .get_strings = rocker_port_get_strings,
4995 .get_ethtool_stats = rocker_port_get_stats,
4996 .get_sset_count = rocker_port_get_sset_count,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004997};
4998
4999/*****************
5000 * NAPI interface
5001 *****************/
5002
5003static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
5004{
5005 return container_of(napi, struct rocker_port, napi_tx);
5006}
5007
5008static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
5009{
5010 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09005011 const struct rocker *rocker = rocker_port->rocker;
5012 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005013 u32 credits = 0;
5014 int err;
5015
5016 /* Cleanup tx descriptors */
5017 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
David Ahernf2bbca52015-01-16 14:22:29 -07005018 struct sk_buff *skb;
5019
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005020 err = rocker_desc_err(desc_info);
5021 if (err && net_ratelimit())
5022 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
5023 err);
5024 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
David Ahernf2bbca52015-01-16 14:22:29 -07005025
5026 skb = rocker_desc_cookie_ptr_get(desc_info);
5027 if (err == 0) {
5028 rocker_port->dev->stats.tx_packets++;
5029 rocker_port->dev->stats.tx_bytes += skb->len;
Scott Feldman4725ceb2015-05-10 09:48:08 -07005030 } else {
David Ahernf2bbca52015-01-16 14:22:29 -07005031 rocker_port->dev->stats.tx_errors++;
Scott Feldman4725ceb2015-05-10 09:48:08 -07005032 }
David Ahernf2bbca52015-01-16 14:22:29 -07005033
5034 dev_kfree_skb_any(skb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005035 credits++;
5036 }
5037
5038 if (credits && netif_queue_stopped(rocker_port->dev))
5039 netif_wake_queue(rocker_port->dev);
5040
5041 napi_complete(napi);
5042 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
5043
5044 return 0;
5045}
5046
Simon Hormane5054642015-05-25 14:28:36 +09005047static int rocker_port_rx_proc(const struct rocker *rocker,
5048 const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005049 struct rocker_desc_info *desc_info)
5050{
Simon Hormane5054642015-05-25 14:28:36 +09005051 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005052 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
5053 size_t rx_len;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005054 u16 rx_flags = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005055
5056 if (!skb)
5057 return -ENOENT;
5058
5059 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
5060 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
5061 return -EINVAL;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005062 if (attrs[ROCKER_TLV_RX_FLAGS])
5063 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005064
5065 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
5066
5067 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
5068 skb_put(skb, rx_len);
5069 skb->protocol = eth_type_trans(skb, rocker_port->dev);
David Ahernf2bbca52015-01-16 14:22:29 -07005070
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005071 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
5072 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
5073
David Ahernf2bbca52015-01-16 14:22:29 -07005074 rocker_port->dev->stats.rx_packets++;
5075 rocker_port->dev->stats.rx_bytes += skb->len;
5076
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005077 netif_receive_skb(skb);
5078
Simon Horman534ba6a2015-06-01 13:25:04 +09005079 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005080}
5081
5082static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
5083{
5084 return container_of(napi, struct rocker_port, napi_rx);
5085}
5086
5087static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
5088{
5089 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09005090 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005091 struct rocker_desc_info *desc_info;
5092 u32 credits = 0;
5093 int err;
5094
5095 /* Process rx descriptors */
5096 while (credits < budget &&
5097 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
5098 err = rocker_desc_err(desc_info);
5099 if (err) {
5100 if (net_ratelimit())
5101 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
5102 err);
5103 } else {
5104 err = rocker_port_rx_proc(rocker, rocker_port,
5105 desc_info);
5106 if (err && net_ratelimit())
5107 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
5108 err);
5109 }
David Ahernf2bbca52015-01-16 14:22:29 -07005110 if (err)
5111 rocker_port->dev->stats.rx_errors++;
5112
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005113 rocker_desc_gen_clear(desc_info);
5114 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
5115 credits++;
5116 }
5117
5118 if (credits < budget)
5119 napi_complete(napi);
5120
5121 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
5122
5123 return credits;
5124}
5125
5126/*****************
5127 * PCI driver ops
5128 *****************/
5129
Simon Hormane5054642015-05-25 14:28:36 +09005130static void rocker_carrier_init(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005131{
Simon Hormane5054642015-05-25 14:28:36 +09005132 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005133 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
5134 bool link_up;
5135
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08005136 link_up = link_status & (1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005137 if (link_up)
5138 netif_carrier_on(rocker_port->dev);
5139 else
5140 netif_carrier_off(rocker_port->dev);
5141}
5142
Jiri Pirkoe4201142016-02-16 15:14:45 +01005143static void rocker_remove_ports(struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005144{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005145 struct rocker_port *rocker_port;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005146 int i;
5147
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005148 for (i = 0; i < rocker->port_count; i++) {
5149 rocker_port = rocker->ports[i];
Scott Feldmana0720312015-06-12 21:09:44 -07005150 if (!rocker_port)
5151 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02005152 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
Jiri Pirkoe4201142016-02-16 15:14:45 +01005153 rocker_world_port_fini(rocker_port);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005154 unregister_netdev(rocker_port->dev);
Jiri Pirkoe4201142016-02-16 15:14:45 +01005155 rocker_world_port_post_fini(rocker_port);
Ido Schimmel1ebd47e2015-08-02 19:29:16 +02005156 free_netdev(rocker_port->dev);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005157 }
Jiri Pirkoe4201142016-02-16 15:14:45 +01005158 rocker_world_fini(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005159 kfree(rocker->ports);
5160}
5161
Simon Horman534ba6a2015-06-01 13:25:04 +09005162static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005163{
Simon Horman534ba6a2015-06-01 13:25:04 +09005164 const struct rocker *rocker = rocker_port->rocker;
Simon Hormane5054642015-05-25 14:28:36 +09005165 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005166 int err;
5167
5168 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
5169 rocker_port->dev->dev_addr);
5170 if (err) {
5171 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
5172 eth_hw_addr_random(rocker_port->dev);
5173 }
5174}
5175
5176static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
5177{
Simon Hormane5054642015-05-25 14:28:36 +09005178 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005179 struct rocker_port *rocker_port;
5180 struct net_device *dev;
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005181 u16 untagged_vid = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005182 int err;
5183
5184 dev = alloc_etherdev(sizeof(struct rocker_port));
5185 if (!dev)
5186 return -ENOMEM;
5187 rocker_port = netdev_priv(dev);
5188 rocker_port->dev = dev;
5189 rocker_port->rocker = rocker;
5190 rocker_port->port_number = port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08005191 rocker_port->pport = port_number + 1;
Scott Feldman5111f802014-11-28 14:34:30 +01005192 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
Scott Feldmane7335702015-09-23 08:39:17 -07005193 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005194
Jiri Pirkoe4201142016-02-16 15:14:45 +01005195 err = rocker_world_check_init(rocker_port);
5196 if (err) {
5197 dev_err(&pdev->dev, "world init failed\n");
5198 goto err_world_check_init;
5199 }
5200
Simon Horman534ba6a2015-06-01 13:25:04 +09005201 rocker_port_dev_addr_init(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005202 dev->netdev_ops = &rocker_port_netdev_ops;
5203 dev->ethtool_ops = &rocker_port_ethtool_ops;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07005204 dev->switchdev_ops = &rocker_port_switchdev_ops;
Eric Dumazetd64b5e82015-11-18 06:31:00 -08005205 netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01005206 NAPI_POLL_WEIGHT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005207 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5208 NAPI_POLL_WEIGHT);
5209 rocker_carrier_init(rocker_port);
5210
Ido Schimmel21518a62015-08-02 20:56:37 +02005211 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005212
Jiri Pirkoe4201142016-02-16 15:14:45 +01005213 err = rocker_world_port_pre_init(rocker_port);
5214 if (err) {
5215 dev_err(&pdev->dev, "port world pre-init failed\n");
5216 goto err_world_port_pre_init;
5217 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005218 err = register_netdev(dev);
5219 if (err) {
5220 dev_err(&pdev->dev, "register_netdev failed\n");
5221 goto err_register_netdev;
5222 }
5223 rocker->ports[port_number] = rocker_port;
5224
Jiri Pirkoe4201142016-02-16 15:14:45 +01005225 err = rocker_world_port_init(rocker_port);
5226 if (err) {
5227 dev_err(&pdev->dev, "port world init failed\n");
5228 goto err_world_port_init;
5229 }
5230
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005231 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5232
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01005233 rocker_port_set_learning(rocker_port, NULL,
5234 !!(rocker_port->brport_flags & BR_LEARNING));
Scott Feldman5111f802014-11-28 14:34:30 +01005235
Jiri Pirko76c6f942015-09-24 10:02:44 +02005236 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005237 if (err) {
Scott Feldmanff147022015-08-03 22:31:18 -07005238 netdev_err(rocker_port->dev, "install ig port table failed\n");
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005239 goto err_port_ig_tbl;
5240 }
5241
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005242 rocker_port->internal_vlan_id =
5243 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5244
Jiri Pirko76c6f942015-09-24 10:02:44 +02005245 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005246 if (err) {
5247 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5248 goto err_untagged_vlan;
5249 }
5250
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005251 return 0;
5252
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005253err_untagged_vlan:
Jiri Pirko76c6f942015-09-24 10:02:44 +02005254 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005255err_port_ig_tbl:
Jiri Pirkoe4201142016-02-16 15:14:45 +01005256 rocker_world_port_fini(rocker_port);
5257err_world_port_init:
Scott Feldman6c4f7782015-08-03 22:31:17 -07005258 rocker->ports[port_number] = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005259 unregister_netdev(dev);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005260err_register_netdev:
Jiri Pirkoe4201142016-02-16 15:14:45 +01005261 rocker_world_port_post_fini(rocker_port);
5262err_world_port_pre_init:
5263err_world_check_init:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005264 free_netdev(dev);
5265 return err;
5266}
5267
5268static int rocker_probe_ports(struct rocker *rocker)
5269{
5270 int i;
5271 size_t alloc_size;
5272 int err;
5273
5274 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
Scott Feldman27b808c2015-06-01 11:39:02 -07005275 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
Dan Carpentere65ad3b2015-02-25 16:35:32 +03005276 if (!rocker->ports)
5277 return -ENOMEM;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005278 for (i = 0; i < rocker->port_count; i++) {
5279 err = rocker_probe_port(rocker, i);
5280 if (err)
5281 goto remove_ports;
5282 }
5283 return 0;
5284
5285remove_ports:
5286 rocker_remove_ports(rocker);
5287 return err;
5288}
5289
5290static int rocker_msix_init(struct rocker *rocker)
5291{
5292 struct pci_dev *pdev = rocker->pdev;
5293 int msix_entries;
5294 int i;
5295 int err;
5296
5297 msix_entries = pci_msix_vec_count(pdev);
5298 if (msix_entries < 0)
5299 return msix_entries;
5300
5301 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5302 return -EINVAL;
5303
5304 rocker->msix_entries = kmalloc_array(msix_entries,
5305 sizeof(struct msix_entry),
5306 GFP_KERNEL);
5307 if (!rocker->msix_entries)
5308 return -ENOMEM;
5309
5310 for (i = 0; i < msix_entries; i++)
5311 rocker->msix_entries[i].entry = i;
5312
5313 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5314 if (err < 0)
5315 goto err_enable_msix;
5316
5317 return 0;
5318
5319err_enable_msix:
5320 kfree(rocker->msix_entries);
5321 return err;
5322}
5323
Simon Hormane5054642015-05-25 14:28:36 +09005324static void rocker_msix_fini(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005325{
5326 pci_disable_msix(rocker->pdev);
5327 kfree(rocker->msix_entries);
5328}
5329
5330static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5331{
5332 struct rocker *rocker;
5333 int err;
5334
5335 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5336 if (!rocker)
5337 return -ENOMEM;
5338
5339 err = pci_enable_device(pdev);
5340 if (err) {
5341 dev_err(&pdev->dev, "pci_enable_device failed\n");
5342 goto err_pci_enable_device;
5343 }
5344
5345 err = pci_request_regions(pdev, rocker_driver_name);
5346 if (err) {
5347 dev_err(&pdev->dev, "pci_request_regions failed\n");
5348 goto err_pci_request_regions;
5349 }
5350
5351 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5352 if (!err) {
5353 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5354 if (err) {
5355 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5356 goto err_pci_set_dma_mask;
5357 }
5358 } else {
5359 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5360 if (err) {
5361 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5362 goto err_pci_set_dma_mask;
5363 }
5364 }
5365
5366 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5367 dev_err(&pdev->dev, "invalid PCI region size\n");
Wei Yongjun3122a922015-04-16 20:21:02 +08005368 err = -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005369 goto err_pci_resource_len_check;
5370 }
5371
5372 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5373 pci_resource_len(pdev, 0));
5374 if (!rocker->hw_addr) {
5375 dev_err(&pdev->dev, "ioremap failed\n");
5376 err = -EIO;
5377 goto err_ioremap;
5378 }
5379 pci_set_master(pdev);
5380
5381 rocker->pdev = pdev;
5382 pci_set_drvdata(pdev, rocker);
5383
5384 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5385
5386 err = rocker_msix_init(rocker);
5387 if (err) {
5388 dev_err(&pdev->dev, "MSI-X init failed\n");
5389 goto err_msix_init;
5390 }
5391
5392 err = rocker_basic_hw_test(rocker);
5393 if (err) {
5394 dev_err(&pdev->dev, "basic hw test failed\n");
5395 goto err_basic_hw_test;
5396 }
5397
5398 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5399
5400 err = rocker_dma_rings_init(rocker);
5401 if (err)
5402 goto err_dma_rings_init;
5403
5404 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5405 rocker_cmd_irq_handler, 0,
5406 rocker_driver_name, rocker);
5407 if (err) {
5408 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5409 goto err_request_cmd_irq;
5410 }
5411
5412 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5413 rocker_event_irq_handler, 0,
5414 rocker_driver_name, rocker);
5415 if (err) {
5416 dev_err(&pdev->dev, "cannot assign event irq\n");
5417 goto err_request_event_irq;
5418 }
5419
5420 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5421
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005422 err = rocker_init_tbls(rocker);
5423 if (err) {
5424 dev_err(&pdev->dev, "cannot init rocker tables\n");
5425 goto err_init_tbls;
5426 }
5427
Scott Feldman52fe3e22015-09-23 08:39:18 -07005428 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5429 (unsigned long) rocker);
5430 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5431
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005432 err = rocker_probe_ports(rocker);
5433 if (err) {
5434 dev_err(&pdev->dev, "failed to probe ports\n");
5435 goto err_probe_ports;
5436 }
5437
Scott Feldmanc8beb5b2015-08-12 18:44:13 -07005438 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5439 (int)sizeof(rocker->hw.id), &rocker->hw.id);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005440
5441 return 0;
5442
5443err_probe_ports:
Scott Feldman52fe3e22015-09-23 08:39:18 -07005444 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005445 rocker_free_tbls(rocker);
5446err_init_tbls:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005447 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5448err_request_event_irq:
5449 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5450err_request_cmd_irq:
5451 rocker_dma_rings_fini(rocker);
5452err_dma_rings_init:
5453err_basic_hw_test:
5454 rocker_msix_fini(rocker);
5455err_msix_init:
5456 iounmap(rocker->hw_addr);
5457err_ioremap:
5458err_pci_resource_len_check:
5459err_pci_set_dma_mask:
5460 pci_release_regions(pdev);
5461err_pci_request_regions:
5462 pci_disable_device(pdev);
5463err_pci_enable_device:
5464 kfree(rocker);
5465 return err;
5466}
5467
5468static void rocker_remove(struct pci_dev *pdev)
5469{
5470 struct rocker *rocker = pci_get_drvdata(pdev);
5471
Scott Feldman52fe3e22015-09-23 08:39:18 -07005472 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005473 rocker_free_tbls(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005474 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5475 rocker_remove_ports(rocker);
5476 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5477 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5478 rocker_dma_rings_fini(rocker);
5479 rocker_msix_fini(rocker);
5480 iounmap(rocker->hw_addr);
5481 pci_release_regions(rocker->pdev);
5482 pci_disable_device(rocker->pdev);
5483 kfree(rocker);
5484}
5485
5486static struct pci_driver rocker_pci_driver = {
5487 .name = rocker_driver_name,
5488 .id_table = rocker_pci_id_table,
5489 .probe = rocker_probe,
5490 .remove = rocker_remove,
5491};
5492
Scott Feldman6c707942014-11-28 14:34:28 +01005493/************************************
5494 * Net device notifier event handler
5495 ************************************/
5496
Simon Hormane5054642015-05-25 14:28:36 +09005497static bool rocker_port_dev_check(const struct net_device *dev)
Scott Feldman6c707942014-11-28 14:34:28 +01005498{
5499 return dev->netdev_ops == &rocker_port_netdev_ops;
5500}
5501
5502static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5503 struct net_device *bridge)
5504{
Scott Feldman027e00d2015-06-01 11:39:05 -07005505 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005506 int err;
5507
Scott Feldman027e00d2015-06-01 11:39:05 -07005508 /* Port is joining bridge, so the internal VLAN for the
5509 * port is going to change to the bridge internal VLAN.
5510 * Let's remove untagged VLAN (vid=0) from port and
5511 * re-add once internal VLAN has changed.
5512 */
5513
5514 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5515 if (err)
5516 return err;
5517
Simon Hormandf6a2062015-05-21 12:40:17 +09005518 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005519 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005520 rocker_port->internal_vlan_id =
5521 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005522
5523 rocker_port->bridge_dev = bridge;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005524 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
Scott Feldman6c707942014-11-28 14:34:28 +01005525
Jiri Pirko76c6f942015-09-24 10:02:44 +02005526 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005527}
5528
5529static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5530{
Scott Feldman027e00d2015-06-01 11:39:05 -07005531 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005532 int err;
5533
Scott Feldman027e00d2015-06-01 11:39:05 -07005534 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5535 if (err)
5536 return err;
5537
Simon Hormandf6a2062015-05-21 12:40:17 +09005538 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005539 rocker_port->bridge_dev->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005540 rocker_port->internal_vlan_id =
5541 rocker_port_internal_vlan_id_get(rocker_port,
5542 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005543
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005544 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5545 false);
Scott Feldman027e00d2015-06-01 11:39:05 -07005546 rocker_port->bridge_dev = NULL;
5547
Jiri Pirko76c6f942015-09-24 10:02:44 +02005548 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldmane47172a2015-02-25 20:15:38 -08005549 if (err)
5550 return err;
5551
5552 if (rocker_port->dev->flags & IFF_UP)
Jiri Pirko76c6f942015-09-24 10:02:44 +02005553 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005554
5555 return err;
5556}
5557
Simon Horman82549732015-07-16 10:39:14 +09005558static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5559 struct net_device *master)
5560{
5561 int err;
5562
5563 rocker_port->bridge_dev = master;
5564
Jiri Pirko76c6f942015-09-24 10:02:44 +02005565 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
Simon Horman82549732015-07-16 10:39:14 +09005566 if (err)
5567 return err;
Jiri Pirko76c6f942015-09-24 10:02:44 +02005568 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Simon Horman82549732015-07-16 10:39:14 +09005569
5570 return err;
5571}
5572
Jiri Pirko686ed302015-08-27 09:31:23 +02005573static int rocker_port_master_linked(struct rocker_port *rocker_port,
5574 struct net_device *master)
Scott Feldman6c707942014-11-28 14:34:28 +01005575{
Scott Feldman6c707942014-11-28 14:34:28 +01005576 int err = 0;
5577
Jiri Pirko686ed302015-08-27 09:31:23 +02005578 if (netif_is_bridge_master(master))
5579 err = rocker_port_bridge_join(rocker_port, master);
5580 else if (netif_is_ovs_master(master))
5581 err = rocker_port_ovs_changed(rocker_port, master);
5582 return err;
5583}
Scott Feldman6c707942014-11-28 14:34:28 +01005584
Jiri Pirko686ed302015-08-27 09:31:23 +02005585static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5586{
5587 int err = 0;
5588
5589 if (rocker_port_is_bridged(rocker_port))
5590 err = rocker_port_bridge_leave(rocker_port);
5591 else if (rocker_port_is_ovsed(rocker_port))
5592 err = rocker_port_ovs_changed(rocker_port, NULL);
Scott Feldman6c707942014-11-28 14:34:28 +01005593 return err;
5594}
5595
5596static int rocker_netdevice_event(struct notifier_block *unused,
5597 unsigned long event, void *ptr)
5598{
Jiri Pirko686ed302015-08-27 09:31:23 +02005599 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5600 struct netdev_notifier_changeupper_info *info;
5601 struct rocker_port *rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01005602 int err;
5603
Jiri Pirko686ed302015-08-27 09:31:23 +02005604 if (!rocker_port_dev_check(dev))
5605 return NOTIFY_DONE;
5606
Scott Feldman6c707942014-11-28 14:34:28 +01005607 switch (event) {
5608 case NETDEV_CHANGEUPPER:
Jiri Pirko686ed302015-08-27 09:31:23 +02005609 info = ptr;
5610 if (!info->master)
5611 goto out;
5612 rocker_port = netdev_priv(dev);
5613 if (info->linking) {
Jiri Pirkoe4201142016-02-16 15:14:45 +01005614 err = rocker_world_port_master_linked(rocker_port,
5615 info->upper_dev);
5616 if (err)
5617 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5618 err);
Jiri Pirko686ed302015-08-27 09:31:23 +02005619 err = rocker_port_master_linked(rocker_port,
5620 info->upper_dev);
5621 if (err)
5622 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5623 err);
5624 } else {
Jiri Pirkoe4201142016-02-16 15:14:45 +01005625 err = rocker_world_port_master_unlinked(rocker_port,
5626 info->upper_dev);
5627 if (err)
5628 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5629 err);
Jiri Pirko686ed302015-08-27 09:31:23 +02005630 err = rocker_port_master_unlinked(rocker_port);
5631 if (err)
5632 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5633 err);
5634 }
Scott Feldman6c707942014-11-28 14:34:28 +01005635 break;
5636 }
Jiri Pirko686ed302015-08-27 09:31:23 +02005637out:
Scott Feldman6c707942014-11-28 14:34:28 +01005638 return NOTIFY_DONE;
5639}
5640
5641static struct notifier_block rocker_netdevice_nb __read_mostly = {
5642 .notifier_call = rocker_netdevice_event,
5643};
5644
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005645/************************************
5646 * Net event notifier event handler
5647 ************************************/
5648
5649static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5650{
5651 struct rocker_port *rocker_port = netdev_priv(dev);
Scott Feldman02a9fbf2015-06-12 21:35:47 -07005652 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5653 ROCKER_OP_FLAG_NOWAIT;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005654 __be32 ip_addr = *(__be32 *)n->primary_key;
5655
Jiri Pirko76c6f942015-09-24 10:02:44 +02005656 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005657}
5658
5659static int rocker_netevent_event(struct notifier_block *unused,
5660 unsigned long event, void *ptr)
5661{
Jiri Pirkoe4201142016-02-16 15:14:45 +01005662 struct rocker_port *rocker_port;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005663 struct net_device *dev;
5664 struct neighbour *n = ptr;
5665 int err;
5666
5667 switch (event) {
5668 case NETEVENT_NEIGH_UPDATE:
5669 if (n->tbl != &arp_tbl)
5670 return NOTIFY_DONE;
5671 dev = n->dev;
5672 if (!rocker_port_dev_check(dev))
5673 return NOTIFY_DONE;
Jiri Pirkoe4201142016-02-16 15:14:45 +01005674 rocker_port = netdev_priv(dev);
5675 err = rocker_world_port_neigh_update(rocker_port, n);
5676 if (err)
5677 netdev_warn(dev, "failed to handle neigh update (err %d)\n",
5678 err);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005679 err = rocker_neigh_update(dev, n);
5680 if (err)
5681 netdev_warn(dev,
5682 "failed to handle neigh update (err %d)\n",
5683 err);
5684 break;
5685 }
5686
5687 return NOTIFY_DONE;
5688}
5689
5690static struct notifier_block rocker_netevent_nb __read_mostly = {
5691 .notifier_call = rocker_netevent_event,
5692};
5693
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005694/***********************
5695 * Module init and exit
5696 ***********************/
5697
5698static int __init rocker_module_init(void)
5699{
Scott Feldman6c707942014-11-28 14:34:28 +01005700 int err;
5701
5702 register_netdevice_notifier(&rocker_netdevice_nb);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005703 register_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005704 err = pci_register_driver(&rocker_pci_driver);
5705 if (err)
5706 goto err_pci_register_driver;
5707 return 0;
5708
5709err_pci_register_driver:
Gilad Ben-Yossefa076e6b2015-06-23 10:52:10 +03005710 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005711 unregister_netdevice_notifier(&rocker_netdevice_nb);
5712 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005713}
5714
5715static void __exit rocker_module_exit(void)
5716{
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005717 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005718 unregister_netdevice_notifier(&rocker_netdevice_nb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005719 pci_unregister_driver(&rocker_pci_driver);
5720}
5721
5722module_init(rocker_module_init);
5723module_exit(rocker_module_exit);
5724
5725MODULE_LICENSE("GPL v2");
5726MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5727MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5728MODULE_DESCRIPTION("Rocker switch device driver");
5729MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);