blob: 0015dcbf83e4591f50ebd20c7cf64b056535ff9c [file] [log] [blame]
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010019#include <linux/hashtable.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010020#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
Scott Feldman6c707942014-11-28 14:34:28 +010031#include <linux/if_bridge.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010032#include <linux/bitops.h>
David Aherndb191702015-03-17 20:23:16 -060033#include <linux/ctype.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010034#include <net/switchdev.h>
35#include <net/rtnetlink.h>
Scott Feldmanc1beeef2015-03-05 21:21:20 -080036#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
Christoph Hellwig2f8e2c82015-08-28 09:27:14 +020039#include <linux/io-64-nonatomic-lo-hi.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010040#include <generated/utsrelease.h>
41
Jiri Pirko0fe685f2016-02-16 15:14:40 +010042#include "rocker_hw.h"
Jiri Pirkode152192016-02-16 15:14:42 +010043#include "rocker.h"
44#include "rocker_tlv.h"
Jiri Pirko4b8ac962014-11-28 14:34:26 +010045
46static const char rocker_driver_name[] = "rocker";
47
48static const struct pci_device_id rocker_pci_id_table[] = {
49 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
50 {0, }
51};
52
Scott Feldman9f6bbf72014-11-28 14:34:27 +010053struct rocker_flow_tbl_key {
54 u32 priority;
55 enum rocker_of_dpa_table_id tbl_id;
56 union {
57 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080058 u32 in_pport;
59 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010060 enum rocker_of_dpa_table_id goto_tbl;
61 } ig_port;
62 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080063 u32 in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010064 __be16 vlan_id;
65 __be16 vlan_id_mask;
66 enum rocker_of_dpa_table_id goto_tbl;
67 bool untagged;
68 __be16 new_vlan_id;
69 } vlan;
70 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080071 u32 in_pport;
72 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010073 __be16 eth_type;
74 u8 eth_dst[ETH_ALEN];
75 u8 eth_dst_mask[ETH_ALEN];
76 __be16 vlan_id;
77 __be16 vlan_id_mask;
78 enum rocker_of_dpa_table_id goto_tbl;
79 bool copy_to_cpu;
80 } term_mac;
81 struct {
82 __be16 eth_type;
83 __be32 dst4;
84 __be32 dst4_mask;
85 enum rocker_of_dpa_table_id goto_tbl;
86 u32 group_id;
87 } ucast_routing;
88 struct {
89 u8 eth_dst[ETH_ALEN];
90 u8 eth_dst_mask[ETH_ALEN];
91 int has_eth_dst;
92 int has_eth_dst_mask;
93 __be16 vlan_id;
94 u32 tunnel_id;
95 enum rocker_of_dpa_table_id goto_tbl;
96 u32 group_id;
97 bool copy_to_cpu;
98 } bridge;
99 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800100 u32 in_pport;
101 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100102 u8 eth_src[ETH_ALEN];
103 u8 eth_src_mask[ETH_ALEN];
104 u8 eth_dst[ETH_ALEN];
105 u8 eth_dst_mask[ETH_ALEN];
106 __be16 eth_type;
107 __be16 vlan_id;
108 __be16 vlan_id_mask;
109 u8 ip_proto;
110 u8 ip_proto_mask;
111 u8 ip_tos;
112 u8 ip_tos_mask;
113 u32 group_id;
114 } acl;
115 };
116};
117
118struct rocker_flow_tbl_entry {
119 struct hlist_node entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800120 u32 cmd;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100121 u64 cookie;
122 struct rocker_flow_tbl_key key;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800123 size_t key_len;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100124 u32 key_crc32; /* key */
125};
126
127struct rocker_group_tbl_entry {
128 struct hlist_node entry;
129 u32 cmd;
130 u32 group_id; /* key */
131 u16 group_count;
132 u32 *group_ids;
133 union {
134 struct {
135 u8 pop_vlan;
136 } l2_interface;
137 struct {
138 u8 eth_src[ETH_ALEN];
139 u8 eth_dst[ETH_ALEN];
140 __be16 vlan_id;
141 u32 group_id;
142 } l2_rewrite;
143 struct {
144 u8 eth_src[ETH_ALEN];
145 u8 eth_dst[ETH_ALEN];
146 __be16 vlan_id;
147 bool ttl_check;
148 u32 group_id;
149 } l3_unicast;
150 };
151};
152
153struct rocker_fdb_tbl_entry {
154 struct hlist_node entry;
155 u32 key_crc32; /* key */
156 bool learned;
Scott Feldmana471be42015-09-23 08:39:14 -0700157 unsigned long touched;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100158 struct rocker_fdb_tbl_key {
Scott Feldman4c660492015-09-23 08:39:15 -0700159 struct rocker_port *rocker_port;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100160 u8 addr[ETH_ALEN];
161 __be16 vlan_id;
162 } key;
163};
164
165struct rocker_internal_vlan_tbl_entry {
166 struct hlist_node entry;
167 int ifindex; /* key */
168 u32 ref_count;
169 __be16 vlan_id;
170};
171
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800172struct rocker_neigh_tbl_entry {
173 struct hlist_node entry;
174 __be32 ip_addr; /* key */
175 struct net_device *dev;
176 u32 ref_count;
177 u32 index;
178 u8 eth_dst[ETH_ALEN];
179 bool ttl_check;
180};
181
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100182static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
183static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
184static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
185static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
186static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
187static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
188static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
189static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
190static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
191
192/* Rocker priority levels for flow table entries. Higher
193 * priority match takes precedence over lower priority match.
194 */
195
196enum {
197 ROCKER_PRIORITY_UNKNOWN = 0,
198 ROCKER_PRIORITY_IG_PORT = 1,
199 ROCKER_PRIORITY_VLAN = 1,
200 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
201 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100202 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
203 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
204 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
205 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
206 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
207 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
208 ROCKER_PRIORITY_ACL_CTRL = 3,
209 ROCKER_PRIORITY_ACL_NORMAL = 2,
210 ROCKER_PRIORITY_ACL_DFLT = 1,
211};
212
213static bool rocker_vlan_id_is_internal(__be16 vlan_id)
214{
215 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
216 u16 end = 0xffe;
217 u16 _vlan_id = ntohs(vlan_id);
218
219 return (_vlan_id >= start && _vlan_id <= end);
220}
221
Simon Hormane5054642015-05-25 14:28:36 +0900222static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100223 u16 vid, bool *pop_vlan)
224{
225 __be16 vlan_id;
226
227 if (pop_vlan)
228 *pop_vlan = false;
229 vlan_id = htons(vid);
230 if (!vlan_id) {
231 vlan_id = rocker_port->internal_vlan_id;
232 if (pop_vlan)
233 *pop_vlan = true;
234 }
235
236 return vlan_id;
237}
238
Simon Hormane5054642015-05-25 14:28:36 +0900239static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +0100240 __be16 vlan_id)
241{
242 if (rocker_vlan_id_is_internal(vlan_id))
243 return 0;
244
245 return ntohs(vlan_id);
246}
247
Simon Hormane5054642015-05-25 14:28:36 +0900248static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +0100249{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200250 return rocker_port->bridge_dev &&
251 netif_is_bridge_master(rocker_port->bridge_dev);
Simon Horman82549732015-07-16 10:39:14 +0900252}
253
254static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
255{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200256 return rocker_port->bridge_dev &&
257 netif_is_ovs_master(rocker_port->bridge_dev);
Scott Feldman6c707942014-11-28 14:34:28 +0100258}
259
Scott Feldman179f9a22015-06-12 21:35:46 -0700260#define ROCKER_OP_FLAG_REMOVE BIT(0)
261#define ROCKER_OP_FLAG_NOWAIT BIT(1)
262#define ROCKER_OP_FLAG_LEARNED BIT(2)
263#define ROCKER_OP_FLAG_REFRESH BIT(3)
264
Jiri Pirkob15edf82016-02-16 15:14:39 +0100265static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags,
266 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700267{
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200268 struct switchdev_trans_item *elem = NULL;
Scott Feldman179f9a22015-06-12 21:35:46 -0700269 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
270 GFP_ATOMIC : GFP_KERNEL;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700271
272 /* If in transaction prepare phase, allocate the memory
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200273 * and enqueue it on a transaction. If in transaction
274 * commit phase, dequeue the memory from the transaction
Scott Feldmanc4f20322015-05-10 09:47:50 -0700275 * rather than re-allocating the memory. The idea is the
276 * driver code paths for prepare and commit are identical
277 * so the memory allocated in the prepare phase is the
278 * memory used in the commit phase.
279 */
280
Jiri Pirko76c6f942015-09-24 10:02:44 +0200281 if (!trans) {
282 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Jiri Pirko76c6f942015-09-24 10:02:44 +0200283 } else if (switchdev_trans_ph_prepare(trans)) {
Scott Feldman179f9a22015-06-12 21:35:46 -0700284 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700285 if (!elem)
286 return NULL;
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200287 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
Jiri Pirko76c6f942015-09-24 10:02:44 +0200288 } else {
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200289 elem = switchdev_trans_item_dequeue(trans);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700290 }
291
292 return elem ? elem + 1 : NULL;
293}
294
Jiri Pirkob15edf82016-02-16 15:14:39 +0100295static void *rocker_kzalloc(struct switchdev_trans *trans, int flags,
296 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700297{
Jiri Pirkob15edf82016-02-16 15:14:39 +0100298 return __rocker_mem_alloc(trans, flags, size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700299}
300
Jiri Pirkob15edf82016-02-16 15:14:39 +0100301static void *rocker_kcalloc(struct switchdev_trans *trans, int flags,
302 size_t n, size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700303{
Jiri Pirkob15edf82016-02-16 15:14:39 +0100304 return __rocker_mem_alloc(trans, flags, n * size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700305}
306
Jiri Pirkob15edf82016-02-16 15:14:39 +0100307static void rocker_kfree(struct switchdev_trans *trans, const void *mem)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700308{
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200309 struct switchdev_trans_item *elem;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700310
311 /* Frees are ignored if in transaction prepare phase. The
312 * memory remains on the per-port list until freed in the
313 * commit phase.
314 */
315
Jiri Pirko76c6f942015-09-24 10:02:44 +0200316 if (switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -0700317 return;
318
Jiri Pirkoac3dbc62015-09-24 10:02:45 +0200319 elem = (struct switchdev_trans_item *) mem - 1;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700320 kfree(elem);
321}
322
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100323struct rocker_wait {
324 wait_queue_head_t wait;
325 bool done;
Scott Feldman179f9a22015-06-12 21:35:46 -0700326 bool nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100327};
328
329static void rocker_wait_reset(struct rocker_wait *wait)
330{
331 wait->done = false;
Scott Feldman179f9a22015-06-12 21:35:46 -0700332 wait->nowait = false;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100333}
334
335static void rocker_wait_init(struct rocker_wait *wait)
336{
337 init_waitqueue_head(&wait->wait);
338 rocker_wait_reset(wait);
339}
340
Scott Feldmanc4f20322015-05-10 09:47:50 -0700341static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +0200342 struct switchdev_trans *trans,
Scott Feldman179f9a22015-06-12 21:35:46 -0700343 int flags)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100344{
345 struct rocker_wait *wait;
346
Jiri Pirkob15edf82016-02-16 15:14:39 +0100347 wait = rocker_kzalloc(trans, flags, sizeof(*wait));
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100348 if (!wait)
349 return NULL;
350 rocker_wait_init(wait);
351 return wait;
352}
353
Jiri Pirko76c6f942015-09-24 10:02:44 +0200354static void rocker_wait_destroy(struct switchdev_trans *trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -0700355 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100356{
Jiri Pirkob15edf82016-02-16 15:14:39 +0100357 rocker_kfree(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100358}
359
360static bool rocker_wait_event_timeout(struct rocker_wait *wait,
361 unsigned long timeout)
362{
363 wait_event_timeout(wait->wait, wait->done, HZ / 10);
364 if (!wait->done)
365 return false;
366 return true;
367}
368
369static void rocker_wait_wake_up(struct rocker_wait *wait)
370{
371 wait->done = true;
372 wake_up(&wait->wait);
373}
374
Simon Hormane5054642015-05-25 14:28:36 +0900375static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100376{
377 return rocker->msix_entries[vector].vector;
378}
379
Simon Hormane5054642015-05-25 14:28:36 +0900380static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100381{
382 return rocker_msix_vector(rocker_port->rocker,
383 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
384}
385
Simon Hormane5054642015-05-25 14:28:36 +0900386static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100387{
388 return rocker_msix_vector(rocker_port->rocker,
389 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
390}
391
392#define rocker_write32(rocker, reg, val) \
393 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
394#define rocker_read32(rocker, reg) \
395 readl((rocker)->hw_addr + (ROCKER_ ## reg))
396#define rocker_write64(rocker, reg, val) \
397 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
398#define rocker_read64(rocker, reg) \
399 readq((rocker)->hw_addr + (ROCKER_ ## reg))
400
401/*****************************
402 * HW basic testing functions
403 *****************************/
404
Simon Hormane5054642015-05-25 14:28:36 +0900405static int rocker_reg_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100406{
Simon Hormane5054642015-05-25 14:28:36 +0900407 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100408 u64 test_reg;
409 u64 rnd;
410
411 rnd = prandom_u32();
412 rnd >>= 1;
413 rocker_write32(rocker, TEST_REG, rnd);
414 test_reg = rocker_read32(rocker, TEST_REG);
415 if (test_reg != rnd * 2) {
416 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
417 test_reg, rnd * 2);
418 return -EIO;
419 }
420
421 rnd = prandom_u32();
422 rnd <<= 31;
423 rnd |= prandom_u32();
424 rocker_write64(rocker, TEST_REG64, rnd);
425 test_reg = rocker_read64(rocker, TEST_REG64);
426 if (test_reg != rnd * 2) {
427 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
428 test_reg, rnd * 2);
429 return -EIO;
430 }
431
432 return 0;
433}
434
Simon Hormane5054642015-05-25 14:28:36 +0900435static int rocker_dma_test_one(const struct rocker *rocker,
436 struct rocker_wait *wait, u32 test_type,
437 dma_addr_t dma_handle, const unsigned char *buf,
438 const unsigned char *expect, size_t size)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100439{
Simon Hormane5054642015-05-25 14:28:36 +0900440 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100441 int i;
442
443 rocker_wait_reset(wait);
444 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
445
446 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
447 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
448 return -EIO;
449 }
450
451 for (i = 0; i < size; i++) {
452 if (buf[i] != expect[i]) {
453 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
454 buf[i], i, expect[i]);
455 return -EIO;
456 }
457 }
458 return 0;
459}
460
461#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
462#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
463
Simon Hormane5054642015-05-25 14:28:36 +0900464static int rocker_dma_test_offset(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100465 struct rocker_wait *wait, int offset)
466{
467 struct pci_dev *pdev = rocker->pdev;
468 unsigned char *alloc;
469 unsigned char *buf;
470 unsigned char *expect;
471 dma_addr_t dma_handle;
472 int i;
473 int err;
474
475 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
476 GFP_KERNEL | GFP_DMA);
477 if (!alloc)
478 return -ENOMEM;
479 buf = alloc + offset;
480 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
481
482 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
483 PCI_DMA_BIDIRECTIONAL);
484 if (pci_dma_mapping_error(pdev, dma_handle)) {
485 err = -EIO;
486 goto free_alloc;
487 }
488
489 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
490 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
491
492 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
493 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
494 dma_handle, buf, expect,
495 ROCKER_TEST_DMA_BUF_SIZE);
496 if (err)
497 goto unmap;
498
499 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
500 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
501 dma_handle, buf, expect,
502 ROCKER_TEST_DMA_BUF_SIZE);
503 if (err)
504 goto unmap;
505
506 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
507 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
508 expect[i] = ~buf[i];
509 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
510 dma_handle, buf, expect,
511 ROCKER_TEST_DMA_BUF_SIZE);
512 if (err)
513 goto unmap;
514
515unmap:
516 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
517 PCI_DMA_BIDIRECTIONAL);
518free_alloc:
519 kfree(alloc);
520
521 return err;
522}
523
Simon Hormane5054642015-05-25 14:28:36 +0900524static int rocker_dma_test(const struct rocker *rocker,
525 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100526{
527 int i;
528 int err;
529
530 for (i = 0; i < 8; i++) {
531 err = rocker_dma_test_offset(rocker, wait, i);
532 if (err)
533 return err;
534 }
535 return 0;
536}
537
538static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
539{
540 struct rocker_wait *wait = dev_id;
541
542 rocker_wait_wake_up(wait);
543
544 return IRQ_HANDLED;
545}
546
Simon Hormane5054642015-05-25 14:28:36 +0900547static int rocker_basic_hw_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100548{
Simon Hormane5054642015-05-25 14:28:36 +0900549 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100550 struct rocker_wait wait;
551 int err;
552
553 err = rocker_reg_test(rocker);
554 if (err) {
555 dev_err(&pdev->dev, "reg test failed\n");
556 return err;
557 }
558
559 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
560 rocker_test_irq_handler, 0,
561 rocker_driver_name, &wait);
562 if (err) {
563 dev_err(&pdev->dev, "cannot assign test irq\n");
564 return err;
565 }
566
567 rocker_wait_init(&wait);
568 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
569
570 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
571 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
572 err = -EIO;
573 goto free_irq;
574 }
575
576 err = rocker_dma_test(rocker, &wait);
577 if (err)
578 dev_err(&pdev->dev, "dma test failed\n");
579
580free_irq:
581 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
582 return err;
583}
584
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100585/******************************************
586 * DMA rings and descriptors manipulations
587 ******************************************/
588
589static u32 __pos_inc(u32 pos, size_t limit)
590{
591 return ++pos == limit ? 0 : pos;
592}
593
Simon Hormane5054642015-05-25 14:28:36 +0900594static int rocker_desc_err(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100595{
Scott Feldman7eb344f2015-02-25 20:15:36 -0800596 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
597
598 switch (err) {
599 case ROCKER_OK:
600 return 0;
601 case -ROCKER_ENOENT:
602 return -ENOENT;
603 case -ROCKER_ENXIO:
604 return -ENXIO;
605 case -ROCKER_ENOMEM:
606 return -ENOMEM;
607 case -ROCKER_EEXIST:
608 return -EEXIST;
609 case -ROCKER_EINVAL:
610 return -EINVAL;
611 case -ROCKER_EMSGSIZE:
612 return -EMSGSIZE;
613 case -ROCKER_ENOTSUP:
614 return -EOPNOTSUPP;
615 case -ROCKER_ENOBUFS:
616 return -ENOBUFS;
617 }
618
619 return -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100620}
621
Simon Hormane5054642015-05-25 14:28:36 +0900622static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100623{
624 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
625}
626
Simon Hormane5054642015-05-25 14:28:36 +0900627static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100628{
629 u32 comp_err = desc_info->desc->comp_err;
630
631 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
632}
633
Jiri Pirko11ce2ba2016-02-16 15:14:41 +0100634static void *
635rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100636{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100637 return (void *)(uintptr_t)desc_info->desc->cookie;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100638}
639
Simon Hormane5054642015-05-25 14:28:36 +0900640static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100641 void *ptr)
642{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100643 desc_info->desc->cookie = (uintptr_t) ptr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100644}
645
646static struct rocker_desc_info *
Simon Hormane5054642015-05-25 14:28:36 +0900647rocker_desc_head_get(const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100648{
649 static struct rocker_desc_info *desc_info;
650 u32 head = __pos_inc(info->head, info->size);
651
652 desc_info = &info->desc_info[info->head];
653 if (head == info->tail)
654 return NULL; /* ring full */
655 desc_info->tlv_size = 0;
656 return desc_info;
657}
658
Simon Hormane5054642015-05-25 14:28:36 +0900659static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100660{
661 desc_info->desc->buf_size = desc_info->data_size;
662 desc_info->desc->tlv_size = desc_info->tlv_size;
663}
664
Simon Hormane5054642015-05-25 14:28:36 +0900665static void rocker_desc_head_set(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100666 struct rocker_dma_ring_info *info,
Simon Hormane5054642015-05-25 14:28:36 +0900667 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100668{
669 u32 head = __pos_inc(info->head, info->size);
670
671 BUG_ON(head == info->tail);
672 rocker_desc_commit(desc_info);
673 info->head = head;
674 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
675}
676
677static struct rocker_desc_info *
678rocker_desc_tail_get(struct rocker_dma_ring_info *info)
679{
680 static struct rocker_desc_info *desc_info;
681
682 if (info->tail == info->head)
683 return NULL; /* nothing to be done between head and tail */
684 desc_info = &info->desc_info[info->tail];
685 if (!rocker_desc_gen(desc_info))
686 return NULL; /* gen bit not set, desc is not ready yet */
687 info->tail = __pos_inc(info->tail, info->size);
688 desc_info->tlv_size = desc_info->desc->tlv_size;
689 return desc_info;
690}
691
Simon Hormane5054642015-05-25 14:28:36 +0900692static void rocker_dma_ring_credits_set(const struct rocker *rocker,
693 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100694 u32 credits)
695{
696 if (credits)
697 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
698}
699
700static unsigned long rocker_dma_ring_size_fix(size_t size)
701{
702 return max(ROCKER_DMA_SIZE_MIN,
703 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
704}
705
Simon Hormane5054642015-05-25 14:28:36 +0900706static int rocker_dma_ring_create(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100707 unsigned int type,
708 size_t size,
709 struct rocker_dma_ring_info *info)
710{
711 int i;
712
713 BUG_ON(size != rocker_dma_ring_size_fix(size));
714 info->size = size;
715 info->type = type;
716 info->head = 0;
717 info->tail = 0;
718 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
719 GFP_KERNEL);
720 if (!info->desc_info)
721 return -ENOMEM;
722
723 info->desc = pci_alloc_consistent(rocker->pdev,
724 info->size * sizeof(*info->desc),
725 &info->mapaddr);
726 if (!info->desc) {
727 kfree(info->desc_info);
728 return -ENOMEM;
729 }
730
731 for (i = 0; i < info->size; i++)
732 info->desc_info[i].desc = &info->desc[i];
733
734 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
735 ROCKER_DMA_DESC_CTRL_RESET);
736 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
737 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
738
739 return 0;
740}
741
Simon Hormane5054642015-05-25 14:28:36 +0900742static void rocker_dma_ring_destroy(const struct rocker *rocker,
743 const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100744{
745 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
746
747 pci_free_consistent(rocker->pdev,
748 info->size * sizeof(struct rocker_desc),
749 info->desc, info->mapaddr);
750 kfree(info->desc_info);
751}
752
Simon Hormane5054642015-05-25 14:28:36 +0900753static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100754 struct rocker_dma_ring_info *info)
755{
756 int i;
757
758 BUG_ON(info->head || info->tail);
759
760 /* When ring is consumer, we need to advance head for each desc.
761 * That tells hw that the desc is ready to be used by it.
762 */
763 for (i = 0; i < info->size - 1; i++)
764 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
765 rocker_desc_commit(&info->desc_info[i]);
766}
767
Simon Hormane5054642015-05-25 14:28:36 +0900768static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
769 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100770 int direction, size_t buf_size)
771{
772 struct pci_dev *pdev = rocker->pdev;
773 int i;
774 int err;
775
776 for (i = 0; i < info->size; i++) {
777 struct rocker_desc_info *desc_info = &info->desc_info[i];
778 struct rocker_desc *desc = &info->desc[i];
779 dma_addr_t dma_handle;
780 char *buf;
781
782 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
783 if (!buf) {
784 err = -ENOMEM;
785 goto rollback;
786 }
787
788 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
789 if (pci_dma_mapping_error(pdev, dma_handle)) {
790 kfree(buf);
791 err = -EIO;
792 goto rollback;
793 }
794
795 desc_info->data = buf;
796 desc_info->data_size = buf_size;
797 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
798
799 desc->buf_addr = dma_handle;
800 desc->buf_size = buf_size;
801 }
802 return 0;
803
804rollback:
805 for (i--; i >= 0; i--) {
Simon Hormane5054642015-05-25 14:28:36 +0900806 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100807
808 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
809 desc_info->data_size, direction);
810 kfree(desc_info->data);
811 }
812 return err;
813}
814
Simon Hormane5054642015-05-25 14:28:36 +0900815static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
816 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100817 int direction)
818{
819 struct pci_dev *pdev = rocker->pdev;
820 int i;
821
822 for (i = 0; i < info->size; i++) {
Simon Hormane5054642015-05-25 14:28:36 +0900823 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100824 struct rocker_desc *desc = &info->desc[i];
825
826 desc->buf_addr = 0;
827 desc->buf_size = 0;
828 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
829 desc_info->data_size, direction);
830 kfree(desc_info->data);
831 }
832}
833
834static int rocker_dma_rings_init(struct rocker *rocker)
835{
Simon Hormane5054642015-05-25 14:28:36 +0900836 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100837 int err;
838
839 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
840 ROCKER_DMA_CMD_DEFAULT_SIZE,
841 &rocker->cmd_ring);
842 if (err) {
843 dev_err(&pdev->dev, "failed to create command dma ring\n");
844 return err;
845 }
846
847 spin_lock_init(&rocker->cmd_ring_lock);
848
849 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
850 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
851 if (err) {
852 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
853 goto err_dma_cmd_ring_bufs_alloc;
854 }
855
856 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
857 ROCKER_DMA_EVENT_DEFAULT_SIZE,
858 &rocker->event_ring);
859 if (err) {
860 dev_err(&pdev->dev, "failed to create event dma ring\n");
861 goto err_dma_event_ring_create;
862 }
863
864 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
865 PCI_DMA_FROMDEVICE, PAGE_SIZE);
866 if (err) {
867 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
868 goto err_dma_event_ring_bufs_alloc;
869 }
870 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
871 return 0;
872
873err_dma_event_ring_bufs_alloc:
874 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
875err_dma_event_ring_create:
876 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
877 PCI_DMA_BIDIRECTIONAL);
878err_dma_cmd_ring_bufs_alloc:
879 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
880 return err;
881}
882
883static void rocker_dma_rings_fini(struct rocker *rocker)
884{
885 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
886 PCI_DMA_BIDIRECTIONAL);
887 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
888 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
889 PCI_DMA_BIDIRECTIONAL);
890 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
891}
892
Simon Horman534ba6a2015-06-01 13:25:04 +0900893static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100894 struct rocker_desc_info *desc_info,
895 struct sk_buff *skb, size_t buf_len)
896{
Simon Horman534ba6a2015-06-01 13:25:04 +0900897 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100898 struct pci_dev *pdev = rocker->pdev;
899 dma_addr_t dma_handle;
900
901 dma_handle = pci_map_single(pdev, skb->data, buf_len,
902 PCI_DMA_FROMDEVICE);
903 if (pci_dma_mapping_error(pdev, dma_handle))
904 return -EIO;
905 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
906 goto tlv_put_failure;
907 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
908 goto tlv_put_failure;
909 return 0;
910
911tlv_put_failure:
912 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
913 desc_info->tlv_size = 0;
914 return -EMSGSIZE;
915}
916
Simon Hormane5054642015-05-25 14:28:36 +0900917static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100918{
919 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
920}
921
Simon Horman534ba6a2015-06-01 13:25:04 +0900922static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100923 struct rocker_desc_info *desc_info)
924{
925 struct net_device *dev = rocker_port->dev;
926 struct sk_buff *skb;
927 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
928 int err;
929
930 /* Ensure that hw will see tlv_size zero in case of an error.
931 * That tells hw to use another descriptor.
932 */
933 rocker_desc_cookie_ptr_set(desc_info, NULL);
934 desc_info->tlv_size = 0;
935
936 skb = netdev_alloc_skb_ip_align(dev, buf_len);
937 if (!skb)
938 return -ENOMEM;
Simon Horman534ba6a2015-06-01 13:25:04 +0900939 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100940 if (err) {
941 dev_kfree_skb_any(skb);
942 return err;
943 }
944 rocker_desc_cookie_ptr_set(desc_info, skb);
945 return 0;
946}
947
Simon Hormane5054642015-05-25 14:28:36 +0900948static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
949 const struct rocker_tlv **attrs)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100950{
951 struct pci_dev *pdev = rocker->pdev;
952 dma_addr_t dma_handle;
953 size_t len;
954
955 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
956 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
957 return;
958 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
959 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
960 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
961}
962
Simon Hormane5054642015-05-25 14:28:36 +0900963static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
964 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100965{
Simon Hormane5054642015-05-25 14:28:36 +0900966 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100967 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
968
969 if (!skb)
970 return;
971 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
972 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
973 dev_kfree_skb_any(skb);
974}
975
Simon Horman534ba6a2015-06-01 13:25:04 +0900976static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100977{
Simon Hormane5054642015-05-25 14:28:36 +0900978 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +0900979 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100980 int i;
981 int err;
982
983 for (i = 0; i < rx_ring->size; i++) {
Simon Horman534ba6a2015-06-01 13:25:04 +0900984 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100985 &rx_ring->desc_info[i]);
986 if (err)
987 goto rollback;
988 }
989 return 0;
990
991rollback:
992 for (i--; i >= 0; i--)
993 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
994 return err;
995}
996
Simon Horman534ba6a2015-06-01 13:25:04 +0900997static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100998{
Simon Hormane5054642015-05-25 14:28:36 +0900999 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001000 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001001 int i;
1002
1003 for (i = 0; i < rx_ring->size; i++)
1004 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1005}
1006
1007static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1008{
1009 struct rocker *rocker = rocker_port->rocker;
1010 int err;
1011
1012 err = rocker_dma_ring_create(rocker,
1013 ROCKER_DMA_TX(rocker_port->port_number),
1014 ROCKER_DMA_TX_DEFAULT_SIZE,
1015 &rocker_port->tx_ring);
1016 if (err) {
1017 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1018 return err;
1019 }
1020
1021 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1022 PCI_DMA_TODEVICE,
1023 ROCKER_DMA_TX_DESC_SIZE);
1024 if (err) {
1025 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1026 goto err_dma_tx_ring_bufs_alloc;
1027 }
1028
1029 err = rocker_dma_ring_create(rocker,
1030 ROCKER_DMA_RX(rocker_port->port_number),
1031 ROCKER_DMA_RX_DEFAULT_SIZE,
1032 &rocker_port->rx_ring);
1033 if (err) {
1034 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1035 goto err_dma_rx_ring_create;
1036 }
1037
1038 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1039 PCI_DMA_BIDIRECTIONAL,
1040 ROCKER_DMA_RX_DESC_SIZE);
1041 if (err) {
1042 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1043 goto err_dma_rx_ring_bufs_alloc;
1044 }
1045
Simon Horman534ba6a2015-06-01 13:25:04 +09001046 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001047 if (err) {
1048 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1049 goto err_dma_rx_ring_skbs_alloc;
1050 }
1051 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1052
1053 return 0;
1054
1055err_dma_rx_ring_skbs_alloc:
1056 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1057 PCI_DMA_BIDIRECTIONAL);
1058err_dma_rx_ring_bufs_alloc:
1059 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1060err_dma_rx_ring_create:
1061 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1062 PCI_DMA_TODEVICE);
1063err_dma_tx_ring_bufs_alloc:
1064 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1065 return err;
1066}
1067
1068static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1069{
1070 struct rocker *rocker = rocker_port->rocker;
1071
Simon Horman534ba6a2015-06-01 13:25:04 +09001072 rocker_dma_rx_ring_skbs_free(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001073 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1074 PCI_DMA_BIDIRECTIONAL);
1075 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1076 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1077 PCI_DMA_TODEVICE);
1078 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1079}
1080
Simon Hormane5054642015-05-25 14:28:36 +09001081static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1082 bool enable)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001083{
1084 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1085
1086 if (enable)
David S. Miller71a83a62015-03-03 21:16:48 -05001087 val |= 1ULL << rocker_port->pport;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001088 else
David S. Miller71a83a62015-03-03 21:16:48 -05001089 val &= ~(1ULL << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001090 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1091}
1092
1093/********************************
1094 * Interrupt handler and helpers
1095 ********************************/
1096
1097static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1098{
1099 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001100 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001101 struct rocker_wait *wait;
1102 u32 credits = 0;
1103
1104 spin_lock(&rocker->cmd_ring_lock);
1105 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1106 wait = rocker_desc_cookie_ptr_get(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001107 if (wait->nowait) {
1108 rocker_desc_gen_clear(desc_info);
Jiri Pirko76c6f942015-09-24 10:02:44 +02001109 rocker_wait_destroy(NULL, wait);
Scott Feldman179f9a22015-06-12 21:35:46 -07001110 } else {
1111 rocker_wait_wake_up(wait);
1112 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001113 credits++;
1114 }
1115 spin_unlock(&rocker->cmd_ring_lock);
1116 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1117
1118 return IRQ_HANDLED;
1119}
1120
Simon Hormane5054642015-05-25 14:28:36 +09001121static void rocker_port_link_up(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001122{
1123 netif_carrier_on(rocker_port->dev);
1124 netdev_info(rocker_port->dev, "Link is up\n");
1125}
1126
Simon Hormane5054642015-05-25 14:28:36 +09001127static void rocker_port_link_down(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001128{
1129 netif_carrier_off(rocker_port->dev);
1130 netdev_info(rocker_port->dev, "Link is down\n");
1131}
1132
Simon Hormane5054642015-05-25 14:28:36 +09001133static int rocker_event_link_change(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001134 const struct rocker_tlv *info)
1135{
Simon Hormane5054642015-05-25 14:28:36 +09001136 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001137 unsigned int port_number;
1138 bool link_up;
1139 struct rocker_port *rocker_port;
1140
1141 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001142 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001143 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1144 return -EIO;
1145 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001146 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001147 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1148
1149 if (port_number >= rocker->port_count)
1150 return -EINVAL;
1151
1152 rocker_port = rocker->ports[port_number];
1153 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1154 if (link_up)
1155 rocker_port_link_up(rocker_port);
1156 else
1157 rocker_port_link_down(rocker_port);
1158 }
1159
1160 return 0;
1161}
1162
Scott Feldman6c707942014-11-28 14:34:28 +01001163static int rocker_port_fdb(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02001164 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01001165 const unsigned char *addr,
1166 __be16 vlan_id, int flags);
Jiri Pirkoe4201142016-02-16 15:14:45 +01001167static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1168 const unsigned char *addr,
1169 __be16 vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01001170
Simon Hormane5054642015-05-25 14:28:36 +09001171static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
Scott Feldman6c707942014-11-28 14:34:28 +01001172 const struct rocker_tlv *info)
1173{
Simon Hormane5054642015-05-25 14:28:36 +09001174 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
Scott Feldman6c707942014-11-28 14:34:28 +01001175 unsigned int port_number;
1176 struct rocker_port *rocker_port;
Simon Hormane5054642015-05-25 14:28:36 +09001177 const unsigned char *addr;
Scott Feldman92014b92015-06-12 21:35:49 -07001178 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
Scott Feldman6c707942014-11-28 14:34:28 +01001179 __be16 vlan_id;
Jiri Pirkoe4201142016-02-16 15:14:45 +01001180 int err;
Scott Feldman6c707942014-11-28 14:34:28 +01001181
1182 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001183 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
Scott Feldman6c707942014-11-28 14:34:28 +01001184 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1185 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1186 return -EIO;
1187 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001188 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
Scott Feldman6c707942014-11-28 14:34:28 +01001189 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
Jiri Pirko9b03c712014-12-03 14:14:53 +01001190 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
Scott Feldman6c707942014-11-28 14:34:28 +01001191
1192 if (port_number >= rocker->port_count)
1193 return -EINVAL;
1194
1195 rocker_port = rocker->ports[port_number];
1196
Jiri Pirkoe4201142016-02-16 15:14:45 +01001197 err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1198 if (err)
1199 return err;
1200
Scott Feldman6c707942014-11-28 14:34:28 +01001201 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1202 rocker_port->stp_state != BR_STATE_FORWARDING)
1203 return 0;
1204
Jiri Pirko76c6f942015-09-24 10:02:44 +02001205 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01001206}
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001207
Simon Hormane5054642015-05-25 14:28:36 +09001208static int rocker_event_process(const struct rocker *rocker,
1209 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001210{
Simon Hormane5054642015-05-25 14:28:36 +09001211 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1212 const struct rocker_tlv *info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001213 u16 type;
1214
1215 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1216 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1217 !attrs[ROCKER_TLV_EVENT_INFO])
1218 return -EIO;
1219
1220 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1221 info = attrs[ROCKER_TLV_EVENT_INFO];
1222
1223 switch (type) {
1224 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1225 return rocker_event_link_change(rocker, info);
Scott Feldman6c707942014-11-28 14:34:28 +01001226 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1227 return rocker_event_mac_vlan_seen(rocker, info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001228 }
1229
1230 return -EOPNOTSUPP;
1231}
1232
1233static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1234{
1235 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001236 const struct pci_dev *pdev = rocker->pdev;
1237 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001238 u32 credits = 0;
1239 int err;
1240
1241 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1242 err = rocker_desc_err(desc_info);
1243 if (err) {
1244 dev_err(&pdev->dev, "event desc received with err %d\n",
1245 err);
1246 } else {
1247 err = rocker_event_process(rocker, desc_info);
1248 if (err)
1249 dev_err(&pdev->dev, "event processing failed with err %d\n",
1250 err);
1251 }
1252 rocker_desc_gen_clear(desc_info);
1253 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1254 credits++;
1255 }
1256 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1257
1258 return IRQ_HANDLED;
1259}
1260
1261static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1262{
1263 struct rocker_port *rocker_port = dev_id;
1264
1265 napi_schedule(&rocker_port->napi_tx);
1266 return IRQ_HANDLED;
1267}
1268
1269static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1270{
1271 struct rocker_port *rocker_port = dev_id;
1272
1273 napi_schedule(&rocker_port->napi_rx);
1274 return IRQ_HANDLED;
1275}
1276
1277/********************
1278 * Command interface
1279 ********************/
1280
Simon Horman534ba6a2015-06-01 13:25:04 +09001281typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001282 struct rocker_desc_info *desc_info,
1283 void *priv);
1284
Simon Horman534ba6a2015-06-01 13:25:04 +09001285typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001286 const struct rocker_desc_info *desc_info,
1287 void *priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001288
Simon Horman534ba6a2015-06-01 13:25:04 +09001289static int rocker_cmd_exec(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02001290 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09001291 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1292 rocker_cmd_proc_cb_t process, void *process_priv)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001293{
Simon Horman534ba6a2015-06-01 13:25:04 +09001294 struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001295 struct rocker_desc_info *desc_info;
1296 struct rocker_wait *wait;
Scott Feldman179f9a22015-06-12 21:35:46 -07001297 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1298 unsigned long lock_flags;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001299 int err;
1300
Jiri Pirko76c6f942015-09-24 10:02:44 +02001301 wait = rocker_wait_create(rocker_port, trans, flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001302 if (!wait)
1303 return -ENOMEM;
Scott Feldman179f9a22015-06-12 21:35:46 -07001304 wait->nowait = nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001305
Scott Feldman179f9a22015-06-12 21:35:46 -07001306 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001307
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001308 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1309 if (!desc_info) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001310 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001311 err = -EAGAIN;
1312 goto out;
1313 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001314
Simon Horman534ba6a2015-06-01 13:25:04 +09001315 err = prepare(rocker_port, desc_info, prepare_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001316 if (err) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001317 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001318 goto out;
1319 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001320
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001321 rocker_desc_cookie_ptr_set(desc_info, wait);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001322
Jiri Pirko76c6f942015-09-24 10:02:44 +02001323 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07001324 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1325
Scott Feldman179f9a22015-06-12 21:35:46 -07001326 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1327
1328 if (nowait)
1329 return 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001330
Jiri Pirko76c6f942015-09-24 10:02:44 +02001331 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07001332 if (!rocker_wait_event_timeout(wait, HZ / 10))
1333 return -EIO;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001334
1335 err = rocker_desc_err(desc_info);
1336 if (err)
1337 return err;
1338
1339 if (process)
Simon Horman534ba6a2015-06-01 13:25:04 +09001340 err = process(rocker_port, desc_info, process_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001341
1342 rocker_desc_gen_clear(desc_info);
1343out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02001344 rocker_wait_destroy(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001345 return err;
1346}
1347
1348static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001349rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001350 struct rocker_desc_info *desc_info,
1351 void *priv)
1352{
1353 struct rocker_tlv *cmd_info;
1354
1355 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1356 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1357 return -EMSGSIZE;
1358 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1359 if (!cmd_info)
1360 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001361 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1362 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001363 return -EMSGSIZE;
1364 rocker_tlv_nest_end(desc_info, cmd_info);
1365 return 0;
1366}
1367
1368static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001369rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001370 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001371 void *priv)
1372{
1373 struct ethtool_cmd *ecmd = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001374 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1375 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001376 u32 speed;
1377 u8 duplex;
1378 u8 autoneg;
1379
1380 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1381 if (!attrs[ROCKER_TLV_CMD_INFO])
1382 return -EIO;
1383
1384 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1385 attrs[ROCKER_TLV_CMD_INFO]);
1386 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1387 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1388 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1389 return -EIO;
1390
1391 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1392 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1393 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1394
1395 ecmd->transceiver = XCVR_INTERNAL;
1396 ecmd->supported = SUPPORTED_TP;
1397 ecmd->phy_address = 0xff;
1398 ecmd->port = PORT_TP;
1399 ethtool_cmd_speed_set(ecmd, speed);
1400 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1401 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1402
1403 return 0;
1404}
1405
1406static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001407rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001408 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001409 void *priv)
1410{
1411 unsigned char *macaddr = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001412 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1413 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1414 const struct rocker_tlv *attr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001415
1416 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1417 if (!attrs[ROCKER_TLV_CMD_INFO])
1418 return -EIO;
1419
1420 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1421 attrs[ROCKER_TLV_CMD_INFO]);
1422 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1423 if (!attr)
1424 return -EIO;
1425
1426 if (rocker_tlv_len(attr) != ETH_ALEN)
1427 return -EINVAL;
1428
1429 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1430 return 0;
1431}
1432
Jiri Pirkoe1ba3de2016-02-16 15:14:43 +01001433static int
1434rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1435 const struct rocker_desc_info *desc_info,
1436 void *priv)
1437{
1438 u8 *p_mode = priv;
1439 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1440 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1441 const struct rocker_tlv *attr;
1442
1443 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1444 if (!attrs[ROCKER_TLV_CMD_INFO])
1445 return -EIO;
1446
1447 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1448 attrs[ROCKER_TLV_CMD_INFO]);
1449 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1450 if (!attr)
1451 return -EIO;
1452
1453 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1454 return 0;
1455}
1456
David Aherndb191702015-03-17 20:23:16 -06001457struct port_name {
1458 char *buf;
1459 size_t len;
1460};
1461
1462static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001463rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001464 const struct rocker_desc_info *desc_info,
David Aherndb191702015-03-17 20:23:16 -06001465 void *priv)
1466{
Simon Hormane5054642015-05-25 14:28:36 +09001467 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1468 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
David Aherndb191702015-03-17 20:23:16 -06001469 struct port_name *name = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001470 const struct rocker_tlv *attr;
David Aherndb191702015-03-17 20:23:16 -06001471 size_t i, j, len;
Simon Hormane5054642015-05-25 14:28:36 +09001472 const char *str;
David Aherndb191702015-03-17 20:23:16 -06001473
1474 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1475 if (!attrs[ROCKER_TLV_CMD_INFO])
1476 return -EIO;
1477
1478 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1479 attrs[ROCKER_TLV_CMD_INFO]);
1480 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1481 if (!attr)
1482 return -EIO;
1483
1484 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1485 str = rocker_tlv_data(attr);
1486
1487 /* make sure name only contains alphanumeric characters */
1488 for (i = j = 0; i < len; ++i) {
1489 if (isalnum(str[i])) {
1490 name->buf[j] = str[i];
1491 j++;
1492 }
1493 }
1494
1495 if (j == 0)
1496 return -EIO;
1497
1498 name->buf[j] = '\0';
1499
1500 return 0;
1501}
1502
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001503static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001504rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001505 struct rocker_desc_info *desc_info,
1506 void *priv)
1507{
1508 struct ethtool_cmd *ecmd = priv;
1509 struct rocker_tlv *cmd_info;
1510
1511 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1512 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1513 return -EMSGSIZE;
1514 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1515 if (!cmd_info)
1516 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001517 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1518 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001519 return -EMSGSIZE;
1520 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1521 ethtool_cmd_speed(ecmd)))
1522 return -EMSGSIZE;
1523 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1524 ecmd->duplex))
1525 return -EMSGSIZE;
1526 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1527 ecmd->autoneg))
1528 return -EMSGSIZE;
1529 rocker_tlv_nest_end(desc_info, cmd_info);
1530 return 0;
1531}
1532
1533static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001534rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001535 struct rocker_desc_info *desc_info,
1536 void *priv)
1537{
Simon Hormane5054642015-05-25 14:28:36 +09001538 const unsigned char *macaddr = priv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001539 struct rocker_tlv *cmd_info;
1540
1541 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1542 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1543 return -EMSGSIZE;
1544 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1545 if (!cmd_info)
1546 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001547 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1548 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001549 return -EMSGSIZE;
1550 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1551 ETH_ALEN, macaddr))
1552 return -EMSGSIZE;
1553 rocker_tlv_nest_end(desc_info, cmd_info);
1554 return 0;
1555}
1556
Scott Feldman5111f802014-11-28 14:34:30 +01001557static int
Scott Feldman77a58c72015-07-08 16:06:47 -07001558rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1559 struct rocker_desc_info *desc_info,
1560 void *priv)
1561{
1562 int mtu = *(int *)priv;
1563 struct rocker_tlv *cmd_info;
1564
1565 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1566 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1567 return -EMSGSIZE;
1568 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1569 if (!cmd_info)
1570 return -EMSGSIZE;
1571 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1572 rocker_port->pport))
1573 return -EMSGSIZE;
1574 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1575 mtu))
1576 return -EMSGSIZE;
1577 rocker_tlv_nest_end(desc_info, cmd_info);
1578 return 0;
1579}
1580
1581static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001582rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
Scott Feldman5111f802014-11-28 14:34:30 +01001583 struct rocker_desc_info *desc_info,
1584 void *priv)
1585{
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001586 bool learning = *(bool *)priv;
Scott Feldman5111f802014-11-28 14:34:30 +01001587 struct rocker_tlv *cmd_info;
1588
1589 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1590 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1591 return -EMSGSIZE;
1592 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1593 if (!cmd_info)
1594 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001595 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1596 rocker_port->pport))
Scott Feldman5111f802014-11-28 14:34:30 +01001597 return -EMSGSIZE;
1598 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001599 learning))
Scott Feldman5111f802014-11-28 14:34:30 +01001600 return -EMSGSIZE;
1601 rocker_tlv_nest_end(desc_info, cmd_info);
1602 return 0;
1603}
1604
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001605static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1606 struct ethtool_cmd *ecmd)
1607{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001608 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001609 rocker_cmd_get_port_settings_prep, NULL,
1610 rocker_cmd_get_port_settings_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001611 ecmd);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001612}
1613
1614static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1615 unsigned char *macaddr)
1616{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001617 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001618 rocker_cmd_get_port_settings_prep, NULL,
1619 rocker_cmd_get_port_settings_macaddr_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001620 macaddr);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001621}
1622
Jiri Pirkoe1ba3de2016-02-16 15:14:43 +01001623static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1624 u8 *p_mode)
1625{
1626 return rocker_cmd_exec(rocker_port, NULL, 0,
1627 rocker_cmd_get_port_settings_prep, NULL,
1628 rocker_cmd_get_port_settings_mode_proc, p_mode);
1629}
1630
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001631static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1632 struct ethtool_cmd *ecmd)
1633{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001634 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001635 rocker_cmd_set_port_settings_ethtool_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001636 ecmd, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001637}
1638
1639static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1640 unsigned char *macaddr)
1641{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001642 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001643 rocker_cmd_set_port_settings_macaddr_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001644 macaddr, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001645}
1646
Scott Feldman77a58c72015-07-08 16:06:47 -07001647static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1648 int mtu)
1649{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001650 return rocker_cmd_exec(rocker_port, NULL, 0,
Scott Feldman77a58c72015-07-08 16:06:47 -07001651 rocker_cmd_set_port_settings_mtu_prep,
1652 &mtu, NULL, NULL);
1653}
1654
Scott Feldmanc4f20322015-05-10 09:47:50 -07001655static int rocker_port_set_learning(struct rocker_port *rocker_port,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001656 struct switchdev_trans *trans,
1657 bool learning)
Scott Feldman5111f802014-11-28 14:34:30 +01001658{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001659 return rocker_cmd_exec(rocker_port, trans, 0,
Scott Feldman5111f802014-11-28 14:34:30 +01001660 rocker_cmd_set_port_learning_prep,
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01001661 &learning, NULL, NULL);
Scott Feldman5111f802014-11-28 14:34:30 +01001662}
1663
Jiri Pirkoe4201142016-02-16 15:14:45 +01001664/**********************
1665 * Worlds manipulation
1666 **********************/
1667
1668static struct rocker_world_ops *rocker_world_ops[] = {
1669 &rocker_ofdpa_ops,
1670};
1671
1672#define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1673
1674static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1675{
1676 int i;
1677
1678 for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1679 if (rocker_world_ops[i]->mode == mode)
1680 return rocker_world_ops[i];
1681 return NULL;
1682}
1683
1684static int rocker_world_init(struct rocker *rocker, u8 mode)
1685{
1686 struct rocker_world_ops *wops;
1687 int err;
1688
1689 wops = rocker_world_ops_find(mode);
1690 if (!wops) {
1691 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1692 mode);
1693 return -EINVAL;
1694 }
1695 rocker->wops = wops;
1696 rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1697 if (!rocker->wpriv)
1698 return -ENOMEM;
1699 if (!wops->init)
1700 return 0;
1701 err = wops->init(rocker);
1702 if (err)
1703 kfree(rocker->wpriv);
1704 return err;
1705}
1706
1707static void rocker_world_fini(struct rocker *rocker)
1708{
1709 struct rocker_world_ops *wops = rocker->wops;
1710
1711 if (!wops || !wops->fini)
1712 return;
1713 wops->fini(rocker);
1714 kfree(rocker->wpriv);
1715}
1716
1717static int rocker_world_check_init(struct rocker_port *rocker_port)
1718{
1719 struct rocker *rocker = rocker_port->rocker;
1720 u8 mode;
1721 int err;
1722
1723 err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1724 if (err) {
1725 dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1726 return err;
1727 }
1728 if (rocker->wops) {
1729 if (rocker->wops->mode != mode) {
1730 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1731 return err;
1732 }
1733 return 0;
1734 }
1735 return rocker_world_init(rocker, mode);
1736}
1737
1738static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1739{
1740 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1741 int err;
1742
1743 rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1744 if (!rocker_port->wpriv)
1745 return -ENOMEM;
1746 if (!wops->port_pre_init)
1747 return 0;
1748 err = wops->port_pre_init(rocker_port);
1749 if (err)
1750 kfree(rocker_port->wpriv);
1751 return 0;
1752}
1753
1754static int rocker_world_port_init(struct rocker_port *rocker_port)
1755{
1756 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1757
1758 if (!wops->port_init)
1759 return 0;
1760 return wops->port_init(rocker_port);
1761}
1762
1763static void rocker_world_port_fini(struct rocker_port *rocker_port)
1764{
1765 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1766
1767 if (!wops->port_fini)
1768 return;
1769 wops->port_fini(rocker_port);
1770}
1771
1772static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1773{
1774 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1775
1776 if (!wops->port_post_fini)
1777 return;
1778 wops->port_post_fini(rocker_port);
1779 kfree(rocker_port->wpriv);
1780}
1781
1782static int rocker_world_port_open(struct rocker_port *rocker_port)
1783{
1784 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1785
1786 if (!wops->port_open)
1787 return 0;
1788 return wops->port_open(rocker_port);
1789}
1790
1791static void rocker_world_port_stop(struct rocker_port *rocker_port)
1792{
1793 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1794
1795 if (!wops->port_stop)
1796 return;
1797 wops->port_stop(rocker_port);
1798}
1799
1800static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1801 u8 state,
1802 struct switchdev_trans *trans)
1803{
1804 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1805
1806 if (!wops->port_attr_stp_state_set)
1807 return 0;
1808 return wops->port_attr_stp_state_set(rocker_port, state, trans);
1809}
1810
1811static int
1812rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1813 unsigned long brport_flags,
1814 struct switchdev_trans *trans)
1815{
1816 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1817
1818 if (!wops->port_attr_bridge_flags_set)
1819 return 0;
1820 return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
1821 trans);
1822}
1823
1824static int
1825rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
1826 unsigned long *p_brport_flags)
1827{
1828 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1829
1830 if (!wops->port_attr_bridge_flags_get)
1831 return 0;
1832 return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
1833}
1834
1835static int
1836rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1837 u32 ageing_time,
1838 struct switchdev_trans *trans)
1839
1840{
1841 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1842
1843 if (!wops->port_attr_bridge_ageing_time_set)
1844 return 0;
1845 return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
1846 trans);
1847}
1848
1849static int
1850rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1851 const struct switchdev_obj_port_vlan *vlan,
1852 struct switchdev_trans *trans)
1853{
1854 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1855
1856 if (!wops->port_obj_vlan_add)
1857 return 0;
1858 return wops->port_obj_vlan_add(rocker_port, vlan, trans);
1859}
1860
1861static int
1862rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1863 const struct switchdev_obj_port_vlan *vlan)
1864{
1865 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1866
1867 if (!wops->port_obj_vlan_del)
1868 return 0;
1869 return wops->port_obj_vlan_del(rocker_port, vlan);
1870}
1871
1872static int
1873rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
1874 struct switchdev_obj_port_vlan *vlan,
1875 switchdev_obj_dump_cb_t *cb)
1876{
1877 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1878
1879 if (!wops->port_obj_vlan_dump)
1880 return 0;
1881 return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
1882}
1883
1884static int
1885rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
1886 const struct switchdev_obj_ipv4_fib *fib4,
1887 struct switchdev_trans *trans)
1888{
1889 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1890
1891 if (!wops->port_obj_fib4_add)
1892 return 0;
1893 return wops->port_obj_fib4_add(rocker_port, fib4, trans);
1894}
1895
1896static int
1897rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
1898 const struct switchdev_obj_ipv4_fib *fib4)
1899{
1900 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1901
1902 if (!wops->port_obj_fib4_del)
1903 return 0;
1904 return wops->port_obj_fib4_del(rocker_port, fib4);
1905}
1906
1907static int
1908rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
1909 const struct switchdev_obj_port_fdb *fdb,
1910 struct switchdev_trans *trans)
1911{
1912 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1913
1914 if (!wops->port_obj_fdb_add)
1915 return 0;
1916 return wops->port_obj_fdb_add(rocker_port, fdb, trans);
1917}
1918
1919static int
1920rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
1921 const struct switchdev_obj_port_fdb *fdb)
1922{
1923 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1924
1925 if (!wops->port_obj_fdb_del)
1926 return 0;
1927 return wops->port_obj_fdb_del(rocker_port, fdb);
1928}
1929
1930static int
1931rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
1932 struct switchdev_obj_port_fdb *fdb,
1933 switchdev_obj_dump_cb_t *cb)
1934{
1935 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1936
1937 if (!wops->port_obj_fdb_dump)
1938 return 0;
1939 return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
1940}
1941
1942static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1943 struct net_device *master)
1944{
1945 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1946
1947 if (!wops->port_master_linked)
1948 return 0;
1949 return wops->port_master_linked(rocker_port, master);
1950}
1951
1952static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1953 struct net_device *master)
1954{
1955 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1956
1957 if (!wops->port_master_unlinked)
1958 return 0;
1959 return wops->port_master_unlinked(rocker_port, master);
1960}
1961
1962static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
1963 struct neighbour *n)
1964{
1965 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1966
1967 if (!wops->port_neigh_update)
1968 return 0;
1969 return wops->port_neigh_update(rocker_port, n);
1970}
1971
1972static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
1973 struct neighbour *n)
1974{
1975 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1976
1977 if (!wops->port_neigh_destroy)
1978 return 0;
1979 return wops->port_neigh_destroy(rocker_port, n);
1980}
1981
1982static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1983 const unsigned char *addr,
1984 __be16 vlan_id)
1985{
1986 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1987
1988 if (!wops->port_ev_mac_vlan_seen)
1989 return 0;
1990 return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1991}
1992
Simon Hormane5054642015-05-25 14:28:36 +09001993static int
1994rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1995 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001996{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001997 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1998 entry->key.ig_port.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001999 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002000 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2001 entry->key.ig_port.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002002 return -EMSGSIZE;
2003 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2004 entry->key.ig_port.goto_tbl))
2005 return -EMSGSIZE;
2006
2007 return 0;
2008}
2009
Simon Hormane5054642015-05-25 14:28:36 +09002010static int
2011rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
2012 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002013{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002014 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2015 entry->key.vlan.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002016 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002017 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2018 entry->key.vlan.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002019 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002020 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2021 entry->key.vlan.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002022 return -EMSGSIZE;
2023 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2024 entry->key.vlan.goto_tbl))
2025 return -EMSGSIZE;
2026 if (entry->key.vlan.untagged &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002027 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
2028 entry->key.vlan.new_vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002029 return -EMSGSIZE;
2030
2031 return 0;
2032}
2033
Simon Hormane5054642015-05-25 14:28:36 +09002034static int
2035rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
2036 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002037{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002038 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2039 entry->key.term_mac.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002040 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002041 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2042 entry->key.term_mac.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002043 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002044 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2045 entry->key.term_mac.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002046 return -EMSGSIZE;
2047 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2048 ETH_ALEN, entry->key.term_mac.eth_dst))
2049 return -EMSGSIZE;
2050 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2051 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
2052 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002053 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2054 entry->key.term_mac.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002055 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002056 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2057 entry->key.term_mac.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002058 return -EMSGSIZE;
2059 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2060 entry->key.term_mac.goto_tbl))
2061 return -EMSGSIZE;
2062 if (entry->key.term_mac.copy_to_cpu &&
2063 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2064 entry->key.term_mac.copy_to_cpu))
2065 return -EMSGSIZE;
2066
2067 return 0;
2068}
2069
2070static int
2071rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002072 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002073{
Jiri Pirko9b03c712014-12-03 14:14:53 +01002074 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2075 entry->key.ucast_routing.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002076 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002077 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2078 entry->key.ucast_routing.dst4))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002079 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002080 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2081 entry->key.ucast_routing.dst4_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002082 return -EMSGSIZE;
2083 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2084 entry->key.ucast_routing.goto_tbl))
2085 return -EMSGSIZE;
2086 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2087 entry->key.ucast_routing.group_id))
2088 return -EMSGSIZE;
2089
2090 return 0;
2091}
2092
Simon Hormane5054642015-05-25 14:28:36 +09002093static int
2094rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2095 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002096{
2097 if (entry->key.bridge.has_eth_dst &&
2098 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2099 ETH_ALEN, entry->key.bridge.eth_dst))
2100 return -EMSGSIZE;
2101 if (entry->key.bridge.has_eth_dst_mask &&
2102 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2103 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2104 return -EMSGSIZE;
2105 if (entry->key.bridge.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002106 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2107 entry->key.bridge.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002108 return -EMSGSIZE;
2109 if (entry->key.bridge.tunnel_id &&
2110 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2111 entry->key.bridge.tunnel_id))
2112 return -EMSGSIZE;
2113 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2114 entry->key.bridge.goto_tbl))
2115 return -EMSGSIZE;
2116 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2117 entry->key.bridge.group_id))
2118 return -EMSGSIZE;
2119 if (entry->key.bridge.copy_to_cpu &&
2120 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2121 entry->key.bridge.copy_to_cpu))
2122 return -EMSGSIZE;
2123
2124 return 0;
2125}
2126
Simon Hormane5054642015-05-25 14:28:36 +09002127static int
2128rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2129 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002130{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002131 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2132 entry->key.acl.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002133 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002134 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2135 entry->key.acl.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002136 return -EMSGSIZE;
2137 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2138 ETH_ALEN, entry->key.acl.eth_src))
2139 return -EMSGSIZE;
2140 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2141 ETH_ALEN, entry->key.acl.eth_src_mask))
2142 return -EMSGSIZE;
2143 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2144 ETH_ALEN, entry->key.acl.eth_dst))
2145 return -EMSGSIZE;
2146 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2147 ETH_ALEN, entry->key.acl.eth_dst_mask))
2148 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002149 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2150 entry->key.acl.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002151 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002152 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2153 entry->key.acl.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002154 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002155 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2156 entry->key.acl.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002157 return -EMSGSIZE;
2158
2159 switch (ntohs(entry->key.acl.eth_type)) {
2160 case ETH_P_IP:
2161 case ETH_P_IPV6:
2162 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2163 entry->key.acl.ip_proto))
2164 return -EMSGSIZE;
2165 if (rocker_tlv_put_u8(desc_info,
2166 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2167 entry->key.acl.ip_proto_mask))
2168 return -EMSGSIZE;
2169 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2170 entry->key.acl.ip_tos & 0x3f))
2171 return -EMSGSIZE;
2172 if (rocker_tlv_put_u8(desc_info,
2173 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2174 entry->key.acl.ip_tos_mask & 0x3f))
2175 return -EMSGSIZE;
2176 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2177 (entry->key.acl.ip_tos & 0xc0) >> 6))
2178 return -EMSGSIZE;
2179 if (rocker_tlv_put_u8(desc_info,
2180 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2181 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2182 return -EMSGSIZE;
2183 break;
2184 }
2185
2186 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2187 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2188 entry->key.acl.group_id))
2189 return -EMSGSIZE;
2190
2191 return 0;
2192}
2193
Simon Horman534ba6a2015-06-01 13:25:04 +09002194static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002195 struct rocker_desc_info *desc_info,
2196 void *priv)
2197{
Simon Hormane5054642015-05-25 14:28:36 +09002198 const struct rocker_flow_tbl_entry *entry = priv;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002199 struct rocker_tlv *cmd_info;
2200 int err = 0;
2201
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002202 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002203 return -EMSGSIZE;
2204 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2205 if (!cmd_info)
2206 return -EMSGSIZE;
2207 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2208 entry->key.tbl_id))
2209 return -EMSGSIZE;
2210 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2211 entry->key.priority))
2212 return -EMSGSIZE;
2213 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2214 return -EMSGSIZE;
2215 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2216 entry->cookie))
2217 return -EMSGSIZE;
2218
2219 switch (entry->key.tbl_id) {
2220 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2221 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2222 break;
2223 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2224 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2225 break;
2226 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2227 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2228 break;
2229 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2230 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2231 break;
2232 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2233 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2234 break;
2235 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2236 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2237 break;
2238 default:
2239 err = -ENOTSUPP;
2240 break;
2241 }
2242
2243 if (err)
2244 return err;
2245
2246 rocker_tlv_nest_end(desc_info, cmd_info);
2247
2248 return 0;
2249}
2250
Simon Horman534ba6a2015-06-01 13:25:04 +09002251static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002252 struct rocker_desc_info *desc_info,
2253 void *priv)
2254{
2255 const struct rocker_flow_tbl_entry *entry = priv;
2256 struct rocker_tlv *cmd_info;
2257
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002258 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002259 return -EMSGSIZE;
2260 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2261 if (!cmd_info)
2262 return -EMSGSIZE;
2263 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2264 entry->cookie))
2265 return -EMSGSIZE;
2266 rocker_tlv_nest_end(desc_info, cmd_info);
2267
2268 return 0;
2269}
2270
2271static int
2272rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2273 struct rocker_group_tbl_entry *entry)
2274{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002275 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002276 ROCKER_GROUP_PORT_GET(entry->group_id)))
2277 return -EMSGSIZE;
2278 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2279 entry->l2_interface.pop_vlan))
2280 return -EMSGSIZE;
2281
2282 return 0;
2283}
2284
2285static int
2286rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002287 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002288{
2289 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2290 entry->l2_rewrite.group_id))
2291 return -EMSGSIZE;
2292 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2293 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2294 ETH_ALEN, entry->l2_rewrite.eth_src))
2295 return -EMSGSIZE;
2296 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2297 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2298 ETH_ALEN, entry->l2_rewrite.eth_dst))
2299 return -EMSGSIZE;
2300 if (entry->l2_rewrite.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002301 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2302 entry->l2_rewrite.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002303 return -EMSGSIZE;
2304
2305 return 0;
2306}
2307
2308static int
2309rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002310 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002311{
2312 int i;
2313 struct rocker_tlv *group_ids;
2314
2315 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2316 entry->group_count))
2317 return -EMSGSIZE;
2318
2319 group_ids = rocker_tlv_nest_start(desc_info,
2320 ROCKER_TLV_OF_DPA_GROUP_IDS);
2321 if (!group_ids)
2322 return -EMSGSIZE;
2323
2324 for (i = 0; i < entry->group_count; i++)
2325 /* Note TLV array is 1-based */
2326 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2327 return -EMSGSIZE;
2328
2329 rocker_tlv_nest_end(desc_info, group_ids);
2330
2331 return 0;
2332}
2333
2334static int
2335rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002336 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002337{
2338 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2339 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2340 ETH_ALEN, entry->l3_unicast.eth_src))
2341 return -EMSGSIZE;
2342 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2343 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2344 ETH_ALEN, entry->l3_unicast.eth_dst))
2345 return -EMSGSIZE;
2346 if (entry->l3_unicast.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002347 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2348 entry->l3_unicast.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002349 return -EMSGSIZE;
2350 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2351 entry->l3_unicast.ttl_check))
2352 return -EMSGSIZE;
2353 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2354 entry->l3_unicast.group_id))
2355 return -EMSGSIZE;
2356
2357 return 0;
2358}
2359
Simon Horman534ba6a2015-06-01 13:25:04 +09002360static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002361 struct rocker_desc_info *desc_info,
2362 void *priv)
2363{
2364 struct rocker_group_tbl_entry *entry = priv;
2365 struct rocker_tlv *cmd_info;
2366 int err = 0;
2367
2368 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2369 return -EMSGSIZE;
2370 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2371 if (!cmd_info)
2372 return -EMSGSIZE;
2373
2374 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2375 entry->group_id))
2376 return -EMSGSIZE;
2377
2378 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2379 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2380 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2381 break;
2382 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2383 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2384 break;
2385 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2386 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2387 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2388 break;
2389 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2390 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2391 break;
2392 default:
2393 err = -ENOTSUPP;
2394 break;
2395 }
2396
2397 if (err)
2398 return err;
2399
2400 rocker_tlv_nest_end(desc_info, cmd_info);
2401
2402 return 0;
2403}
2404
Simon Horman534ba6a2015-06-01 13:25:04 +09002405static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002406 struct rocker_desc_info *desc_info,
2407 void *priv)
2408{
2409 const struct rocker_group_tbl_entry *entry = priv;
2410 struct rocker_tlv *cmd_info;
2411
2412 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2413 return -EMSGSIZE;
2414 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2415 if (!cmd_info)
2416 return -EMSGSIZE;
2417 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2418 entry->group_id))
2419 return -EMSGSIZE;
2420 rocker_tlv_nest_end(desc_info, cmd_info);
2421
2422 return 0;
2423}
2424
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002425/***************************************************
2426 * Flow, group, FDB, internal VLAN and neigh tables
2427 ***************************************************/
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002428
2429static int rocker_init_tbls(struct rocker *rocker)
2430{
2431 hash_init(rocker->flow_tbl);
2432 spin_lock_init(&rocker->flow_tbl_lock);
2433
2434 hash_init(rocker->group_tbl);
2435 spin_lock_init(&rocker->group_tbl_lock);
2436
2437 hash_init(rocker->fdb_tbl);
2438 spin_lock_init(&rocker->fdb_tbl_lock);
2439
2440 hash_init(rocker->internal_vlan_tbl);
2441 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2442
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002443 hash_init(rocker->neigh_tbl);
2444 spin_lock_init(&rocker->neigh_tbl_lock);
2445
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002446 return 0;
2447}
2448
2449static void rocker_free_tbls(struct rocker *rocker)
2450{
2451 unsigned long flags;
2452 struct rocker_flow_tbl_entry *flow_entry;
2453 struct rocker_group_tbl_entry *group_entry;
2454 struct rocker_fdb_tbl_entry *fdb_entry;
2455 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002456 struct rocker_neigh_tbl_entry *neigh_entry;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002457 struct hlist_node *tmp;
2458 int bkt;
2459
2460 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2461 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2462 hash_del(&flow_entry->entry);
2463 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2464
2465 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2466 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2467 hash_del(&group_entry->entry);
2468 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2469
2470 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2471 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2472 hash_del(&fdb_entry->entry);
2473 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2474
2475 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2476 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2477 tmp, internal_vlan_entry, entry)
2478 hash_del(&internal_vlan_entry->entry);
2479 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002480
2481 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2482 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2483 hash_del(&neigh_entry->entry);
2484 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002485}
2486
2487static struct rocker_flow_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002488rocker_flow_tbl_find(const struct rocker *rocker,
2489 const struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002490{
2491 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002492 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002493
2494 hash_for_each_possible(rocker->flow_tbl, found,
2495 entry, match->key_crc32) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002496 if (memcmp(&found->key, &match->key, key_len) == 0)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002497 return found;
2498 }
2499
2500 return NULL;
2501}
2502
2503static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002504 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002505 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002506{
2507 struct rocker *rocker = rocker_port->rocker;
2508 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002509 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002510 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002511
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002512 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002513
Scott Feldman179f9a22015-06-12 21:35:46 -07002514 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002515
2516 found = rocker_flow_tbl_find(rocker, match);
2517
2518 if (found) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002519 match->cookie = found->cookie;
Jiri Pirko76c6f942015-09-24 10:02:44 +02002520 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002521 hash_del(&found->entry);
Jiri Pirkob15edf82016-02-16 15:14:39 +01002522 rocker_kfree(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002523 found = match;
2524 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002525 } else {
2526 found = match;
2527 found->cookie = rocker->flow_tbl_next_cookie++;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002528 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002529 }
2530
Jiri Pirko76c6f942015-09-24 10:02:44 +02002531 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002532 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002533
Scott Feldman179f9a22015-06-12 21:35:46 -07002534 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002535
Jiri Pirko76c6f942015-09-24 10:02:44 +02002536 return rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07002537 rocker_cmd_flow_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002538}
2539
2540static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002541 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002542 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002543{
2544 struct rocker *rocker = rocker_port->rocker;
2545 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002546 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002547 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002548 int err = 0;
2549
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002550 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002551
Scott Feldman179f9a22015-06-12 21:35:46 -07002552 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002553
2554 found = rocker_flow_tbl_find(rocker, match);
2555
2556 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002557 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002558 hash_del(&found->entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002559 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002560 }
2561
Scott Feldman179f9a22015-06-12 21:35:46 -07002562 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002563
Jiri Pirkob15edf82016-02-16 15:14:39 +01002564 rocker_kfree(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002565
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002566 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002567 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002568 rocker_cmd_flow_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002569 found, NULL, NULL);
Jiri Pirkob15edf82016-02-16 15:14:39 +01002570 rocker_kfree(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002571 }
2572
2573 return err;
2574}
2575
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002576static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002577 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002578 struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002579{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002580 if (flags & ROCKER_OP_FLAG_REMOVE)
Jiri Pirko76c6f942015-09-24 10:02:44 +02002581 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002582 else
Jiri Pirko76c6f942015-09-24 10:02:44 +02002583 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002584}
2585
2586static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002587 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002588 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002589 enum rocker_of_dpa_table_id goto_tbl)
2590{
2591 struct rocker_flow_tbl_entry *entry;
2592
Jiri Pirkob15edf82016-02-16 15:14:39 +01002593 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002594 if (!entry)
2595 return -ENOMEM;
2596
2597 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2598 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002599 entry->key.ig_port.in_pport = in_pport;
2600 entry->key.ig_port.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002601 entry->key.ig_port.goto_tbl = goto_tbl;
2602
Jiri Pirko76c6f942015-09-24 10:02:44 +02002603 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002604}
2605
2606static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002607 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002608 u32 in_pport, __be16 vlan_id,
2609 __be16 vlan_id_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002610 enum rocker_of_dpa_table_id goto_tbl,
2611 bool untagged, __be16 new_vlan_id)
2612{
2613 struct rocker_flow_tbl_entry *entry;
2614
Jiri Pirkob15edf82016-02-16 15:14:39 +01002615 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002616 if (!entry)
2617 return -ENOMEM;
2618
2619 entry->key.priority = ROCKER_PRIORITY_VLAN;
2620 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002621 entry->key.vlan.in_pport = in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002622 entry->key.vlan.vlan_id = vlan_id;
2623 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2624 entry->key.vlan.goto_tbl = goto_tbl;
2625
2626 entry->key.vlan.untagged = untagged;
2627 entry->key.vlan.new_vlan_id = new_vlan_id;
2628
Jiri Pirko76c6f942015-09-24 10:02:44 +02002629 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002630}
2631
2632static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002633 struct switchdev_trans *trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002634 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002635 __be16 eth_type, const u8 *eth_dst,
2636 const u8 *eth_dst_mask, __be16 vlan_id,
2637 __be16 vlan_id_mask, bool copy_to_cpu,
2638 int flags)
2639{
2640 struct rocker_flow_tbl_entry *entry;
2641
Jiri Pirkob15edf82016-02-16 15:14:39 +01002642 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002643 if (!entry)
2644 return -ENOMEM;
2645
2646 if (is_multicast_ether_addr(eth_dst)) {
2647 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2648 entry->key.term_mac.goto_tbl =
2649 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2650 } else {
2651 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2652 entry->key.term_mac.goto_tbl =
2653 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2654 }
2655
2656 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002657 entry->key.term_mac.in_pport = in_pport;
2658 entry->key.term_mac.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002659 entry->key.term_mac.eth_type = eth_type;
2660 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2661 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2662 entry->key.term_mac.vlan_id = vlan_id;
2663 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2664 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2665
Jiri Pirko76c6f942015-09-24 10:02:44 +02002666 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002667}
2668
2669static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002670 struct switchdev_trans *trans, int flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002671 const u8 *eth_dst, const u8 *eth_dst_mask,
2672 __be16 vlan_id, u32 tunnel_id,
2673 enum rocker_of_dpa_table_id goto_tbl,
2674 u32 group_id, bool copy_to_cpu)
2675{
2676 struct rocker_flow_tbl_entry *entry;
2677 u32 priority;
2678 bool vlan_bridging = !!vlan_id;
2679 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2680 bool wild = false;
2681
Jiri Pirkob15edf82016-02-16 15:14:39 +01002682 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002683 if (!entry)
2684 return -ENOMEM;
2685
2686 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2687
2688 if (eth_dst) {
2689 entry->key.bridge.has_eth_dst = 1;
2690 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2691 }
2692 if (eth_dst_mask) {
2693 entry->key.bridge.has_eth_dst_mask = 1;
2694 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
Simon Horman629161f2015-04-30 15:21:29 +09002695 if (!ether_addr_equal(eth_dst_mask, ff_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002696 wild = true;
2697 }
2698
2699 priority = ROCKER_PRIORITY_UNKNOWN;
Thomas Graf51ace882014-11-28 14:34:32 +01002700 if (vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002701 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002702 else if (vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002703 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002704 else if (vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002705 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
Thomas Graf51ace882014-11-28 14:34:32 +01002706 else if (!vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002707 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002708 else if (!vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002709 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002710 else if (!vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002711 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2712
2713 entry->key.priority = priority;
2714 entry->key.bridge.vlan_id = vlan_id;
2715 entry->key.bridge.tunnel_id = tunnel_id;
2716 entry->key.bridge.goto_tbl = goto_tbl;
2717 entry->key.bridge.group_id = group_id;
2718 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2719
Jiri Pirko76c6f942015-09-24 10:02:44 +02002720 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002721}
2722
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002723static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002724 struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002725 __be16 eth_type, __be32 dst,
2726 __be32 dst_mask, u32 priority,
2727 enum rocker_of_dpa_table_id goto_tbl,
2728 u32 group_id, int flags)
2729{
2730 struct rocker_flow_tbl_entry *entry;
2731
Jiri Pirkob15edf82016-02-16 15:14:39 +01002732 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002733 if (!entry)
2734 return -ENOMEM;
2735
2736 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2737 entry->key.priority = priority;
2738 entry->key.ucast_routing.eth_type = eth_type;
2739 entry->key.ucast_routing.dst4 = dst;
2740 entry->key.ucast_routing.dst4_mask = dst_mask;
2741 entry->key.ucast_routing.goto_tbl = goto_tbl;
2742 entry->key.ucast_routing.group_id = group_id;
2743 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2744 ucast_routing.group_id);
2745
Jiri Pirko76c6f942015-09-24 10:02:44 +02002746 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002747}
2748
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002749static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002750 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002751 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002752 const u8 *eth_src, const u8 *eth_src_mask,
2753 const u8 *eth_dst, const u8 *eth_dst_mask,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002754 __be16 eth_type, __be16 vlan_id,
2755 __be16 vlan_id_mask, u8 ip_proto,
2756 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002757 u32 group_id)
2758{
2759 u32 priority;
2760 struct rocker_flow_tbl_entry *entry;
2761
Jiri Pirkob15edf82016-02-16 15:14:39 +01002762 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002763 if (!entry)
2764 return -ENOMEM;
2765
2766 priority = ROCKER_PRIORITY_ACL_NORMAL;
2767 if (eth_dst && eth_dst_mask) {
Simon Horman629161f2015-04-30 15:21:29 +09002768 if (ether_addr_equal(eth_dst_mask, mcast_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002769 priority = ROCKER_PRIORITY_ACL_DFLT;
2770 else if (is_link_local_ether_addr(eth_dst))
2771 priority = ROCKER_PRIORITY_ACL_CTRL;
2772 }
2773
2774 entry->key.priority = priority;
2775 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002776 entry->key.acl.in_pport = in_pport;
2777 entry->key.acl.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002778
2779 if (eth_src)
2780 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2781 if (eth_src_mask)
2782 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2783 if (eth_dst)
2784 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2785 if (eth_dst_mask)
2786 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2787
2788 entry->key.acl.eth_type = eth_type;
2789 entry->key.acl.vlan_id = vlan_id;
2790 entry->key.acl.vlan_id_mask = vlan_id_mask;
2791 entry->key.acl.ip_proto = ip_proto;
2792 entry->key.acl.ip_proto_mask = ip_proto_mask;
2793 entry->key.acl.ip_tos = ip_tos;
2794 entry->key.acl.ip_tos_mask = ip_tos_mask;
2795 entry->key.acl.group_id = group_id;
2796
Jiri Pirko76c6f942015-09-24 10:02:44 +02002797 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002798}
2799
2800static struct rocker_group_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002801rocker_group_tbl_find(const struct rocker *rocker,
2802 const struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002803{
2804 struct rocker_group_tbl_entry *found;
2805
2806 hash_for_each_possible(rocker->group_tbl, found,
2807 entry, match->group_id) {
2808 if (found->group_id == match->group_id)
2809 return found;
2810 }
2811
2812 return NULL;
2813}
2814
Jiri Pirko76c6f942015-09-24 10:02:44 +02002815static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002816 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002817{
2818 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2819 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2820 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
Jiri Pirkob15edf82016-02-16 15:14:39 +01002821 rocker_kfree(trans, entry->group_ids);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002822 break;
2823 default:
2824 break;
2825 }
Jiri Pirkob15edf82016-02-16 15:14:39 +01002826 rocker_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002827}
2828
2829static int rocker_group_tbl_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002830 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002831 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002832{
2833 struct rocker *rocker = rocker_port->rocker;
2834 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002835 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002836
Scott Feldman179f9a22015-06-12 21:35:46 -07002837 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002838
2839 found = rocker_group_tbl_find(rocker, match);
2840
2841 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002842 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002843 hash_del(&found->entry);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002844 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002845 found = match;
2846 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2847 } else {
2848 found = match;
2849 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2850 }
2851
Jiri Pirko76c6f942015-09-24 10:02:44 +02002852 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002853 hash_add(rocker->group_tbl, &found->entry, found->group_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002854
Scott Feldman179f9a22015-06-12 21:35:46 -07002855 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002856
Jiri Pirko76c6f942015-09-24 10:02:44 +02002857 return rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07002858 rocker_cmd_group_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002859}
2860
2861static int rocker_group_tbl_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002862 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002863 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002864{
2865 struct rocker *rocker = rocker_port->rocker;
2866 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002867 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002868 int err = 0;
2869
Scott Feldman179f9a22015-06-12 21:35:46 -07002870 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002871
2872 found = rocker_group_tbl_find(rocker, match);
2873
2874 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002875 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002876 hash_del(&found->entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002877 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2878 }
2879
Scott Feldman179f9a22015-06-12 21:35:46 -07002880 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002881
Jiri Pirko76c6f942015-09-24 10:02:44 +02002882 rocker_group_tbl_entry_free(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002883
2884 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002885 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002886 rocker_cmd_group_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002887 found, NULL, NULL);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002888 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002889 }
2890
2891 return err;
2892}
2893
2894static int rocker_group_tbl_do(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002895 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002896 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002897{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002898 if (flags & ROCKER_OP_FLAG_REMOVE)
Jiri Pirko76c6f942015-09-24 10:02:44 +02002899 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002900 else
Jiri Pirko76c6f942015-09-24 10:02:44 +02002901 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002902}
2903
2904static int rocker_group_l2_interface(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002905 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002906 __be16 vlan_id, u32 out_pport,
2907 int pop_vlan)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002908{
2909 struct rocker_group_tbl_entry *entry;
2910
Jiri Pirkob15edf82016-02-16 15:14:39 +01002911 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002912 if (!entry)
2913 return -ENOMEM;
2914
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002915 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002916 entry->l2_interface.pop_vlan = pop_vlan;
2917
Jiri Pirko76c6f942015-09-24 10:02:44 +02002918 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002919}
2920
2921static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002922 struct switchdev_trans *trans,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002923 int flags, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002924 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002925{
2926 struct rocker_group_tbl_entry *entry;
2927
Jiri Pirkob15edf82016-02-16 15:14:39 +01002928 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002929 if (!entry)
2930 return -ENOMEM;
2931
2932 entry->group_id = group_id;
2933 entry->group_count = group_count;
2934
Jiri Pirkob15edf82016-02-16 15:14:39 +01002935 entry->group_ids = rocker_kcalloc(trans, flags,
2936 group_count, sizeof(u32));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002937 if (!entry->group_ids) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01002938 rocker_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002939 return -ENOMEM;
2940 }
2941 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2942
Jiri Pirko76c6f942015-09-24 10:02:44 +02002943 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002944}
2945
2946static int rocker_group_l2_flood(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002947 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002948 __be16 vlan_id, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002949 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002950{
Jiri Pirko76c6f942015-09-24 10:02:44 +02002951 return rocker_group_l2_fan_out(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002952 group_count, group_ids,
2953 group_id);
2954}
2955
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002956static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002957 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09002958 u32 index, const u8 *src_mac, const u8 *dst_mac,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002959 __be16 vlan_id, bool ttl_check, u32 pport)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002960{
2961 struct rocker_group_tbl_entry *entry;
2962
Jiri Pirkob15edf82016-02-16 15:14:39 +01002963 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002964 if (!entry)
2965 return -ENOMEM;
2966
2967 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2968 if (src_mac)
2969 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2970 if (dst_mac)
2971 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2972 entry->l3_unicast.vlan_id = vlan_id;
2973 entry->l3_unicast.ttl_check = ttl_check;
2974 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2975
Jiri Pirko76c6f942015-09-24 10:02:44 +02002976 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002977}
2978
2979static struct rocker_neigh_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002980rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002981{
2982 struct rocker_neigh_tbl_entry *found;
2983
Scott Feldman0f43deb2015-03-06 15:54:51 -08002984 hash_for_each_possible(rocker->neigh_tbl, found,
2985 entry, be32_to_cpu(ip_addr))
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002986 if (found->ip_addr == ip_addr)
2987 return found;
2988
2989 return NULL;
2990}
2991
2992static void _rocker_neigh_add(struct rocker *rocker,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002993 struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002994 struct rocker_neigh_tbl_entry *entry)
2995{
Jiri Pirko76c6f942015-09-24 10:02:44 +02002996 if (!switchdev_trans_ph_commit(trans))
Scott Feldman4d81db42015-06-12 21:24:40 -07002997 entry->index = rocker->neigh_tbl_next_index++;
Jiri Pirko76c6f942015-09-24 10:02:44 +02002998 if (switchdev_trans_ph_prepare(trans))
Simon Horman550ecc92015-05-21 12:40:16 +09002999 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003000 entry->ref_count++;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003001 hash_add(rocker->neigh_tbl, &entry->entry,
3002 be32_to_cpu(entry->ip_addr));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003003}
3004
Jiri Pirko76c6f942015-09-24 10:02:44 +02003005static void _rocker_neigh_del(struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003006 struct rocker_neigh_tbl_entry *entry)
3007{
Jiri Pirko76c6f942015-09-24 10:02:44 +02003008 if (switchdev_trans_ph_prepare(trans))
Simon Horman550ecc92015-05-21 12:40:16 +09003009 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003010 if (--entry->ref_count == 0) {
3011 hash_del(&entry->entry);
Jiri Pirkob15edf82016-02-16 15:14:39 +01003012 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003013 }
3014}
3015
Scott Feldmanc4f20322015-05-10 09:47:50 -07003016static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003017 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09003018 const u8 *eth_dst, bool ttl_check)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003019{
3020 if (eth_dst) {
3021 ether_addr_copy(entry->eth_dst, eth_dst);
3022 entry->ttl_check = ttl_check;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003023 } else if (!switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003024 entry->ref_count++;
3025 }
3026}
3027
3028static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003029 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09003030 int flags, __be32 ip_addr, const u8 *eth_dst)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003031{
3032 struct rocker *rocker = rocker_port->rocker;
3033 struct rocker_neigh_tbl_entry *entry;
3034 struct rocker_neigh_tbl_entry *found;
3035 unsigned long lock_flags;
3036 __be16 eth_type = htons(ETH_P_IP);
3037 enum rocker_of_dpa_table_id goto_tbl =
3038 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3039 u32 group_id;
3040 u32 priority = 0;
3041 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3042 bool updating;
3043 bool removing;
3044 int err = 0;
3045
Jiri Pirkob15edf82016-02-16 15:14:39 +01003046 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003047 if (!entry)
3048 return -ENOMEM;
3049
3050 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3051
3052 found = rocker_neigh_tbl_find(rocker, ip_addr);
3053
3054 updating = found && adding;
3055 removing = found && !adding;
3056 adding = !found && adding;
3057
3058 if (adding) {
3059 entry->ip_addr = ip_addr;
3060 entry->dev = rocker_port->dev;
3061 ether_addr_copy(entry->eth_dst, eth_dst);
3062 entry->ttl_check = true;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003063 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003064 } else if (removing) {
3065 memcpy(entry, found, sizeof(*entry));
Jiri Pirko76c6f942015-09-24 10:02:44 +02003066 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003067 } else if (updating) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003068 _rocker_neigh_update(found, trans, eth_dst, true);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003069 memcpy(entry, found, sizeof(*entry));
3070 } else {
3071 err = -ENOENT;
3072 }
3073
3074 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3075
3076 if (err)
3077 goto err_out;
3078
3079 /* For each active neighbor, we have an L3 unicast group and
3080 * a /32 route to the neighbor, which uses the L3 unicast
3081 * group. The L3 unicast group can also be referred to by
3082 * other routes' nexthops.
3083 */
3084
Jiri Pirko76c6f942015-09-24 10:02:44 +02003085 err = rocker_group_l3_unicast(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003086 entry->index,
3087 rocker_port->dev->dev_addr,
3088 entry->eth_dst,
3089 rocker_port->internal_vlan_id,
3090 entry->ttl_check,
3091 rocker_port->pport);
3092 if (err) {
3093 netdev_err(rocker_port->dev,
3094 "Error (%d) L3 unicast group index %d\n",
3095 err, entry->index);
3096 goto err_out;
3097 }
3098
3099 if (adding || removing) {
3100 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003101 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003102 eth_type, ip_addr,
3103 inet_make_mask(32),
3104 priority, goto_tbl,
3105 group_id, flags);
3106
3107 if (err)
3108 netdev_err(rocker_port->dev,
3109 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3110 err, &entry->ip_addr, group_id);
3111 }
3112
3113err_out:
3114 if (!adding)
Jiri Pirkob15edf82016-02-16 15:14:39 +01003115 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003116
3117 return err;
3118}
3119
3120static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003121 struct switchdev_trans *trans,
3122 __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003123{
3124 struct net_device *dev = rocker_port->dev;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003125 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003126 int err = 0;
3127
Ying Xue4133fc02015-05-15 12:53:21 +08003128 if (!n) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003129 n = neigh_create(&arp_tbl, &ip_addr, dev);
Ying Xue4133fc02015-05-15 12:53:21 +08003130 if (IS_ERR(n))
3131 return IS_ERR(n);
3132 }
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003133
3134 /* If the neigh is already resolved, then go ahead and
3135 * install the entry, otherwise start the ARP process to
3136 * resolve the neigh.
3137 */
3138
3139 if (n->nud_state & NUD_VALID)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003140 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003141 ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003142 else
3143 neigh_event_send(n, NULL);
3144
Ying Xue4133fc02015-05-15 12:53:21 +08003145 neigh_release(n);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003146 return err;
3147}
3148
Scott Feldmanc4f20322015-05-10 09:47:50 -07003149static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003150 struct switchdev_trans *trans, int flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003151 __be32 ip_addr, u32 *index)
3152{
3153 struct rocker *rocker = rocker_port->rocker;
3154 struct rocker_neigh_tbl_entry *entry;
3155 struct rocker_neigh_tbl_entry *found;
3156 unsigned long lock_flags;
3157 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3158 bool updating;
3159 bool removing;
3160 bool resolved = true;
3161 int err = 0;
3162
Jiri Pirkob15edf82016-02-16 15:14:39 +01003163 entry = rocker_kzalloc(trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003164 if (!entry)
3165 return -ENOMEM;
3166
3167 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3168
3169 found = rocker_neigh_tbl_find(rocker, ip_addr);
3170 if (found)
3171 *index = found->index;
3172
3173 updating = found && adding;
3174 removing = found && !adding;
3175 adding = !found && adding;
3176
3177 if (adding) {
3178 entry->ip_addr = ip_addr;
3179 entry->dev = rocker_port->dev;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003180 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003181 *index = entry->index;
3182 resolved = false;
3183 } else if (removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003184 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003185 } else if (updating) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003186 _rocker_neigh_update(found, trans, NULL, false);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003187 resolved = !is_zero_ether_addr(found->eth_dst);
3188 } else {
3189 err = -ENOENT;
3190 }
3191
3192 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3193
3194 if (!adding)
Jiri Pirkob15edf82016-02-16 15:14:39 +01003195 rocker_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003196
3197 if (err)
3198 return err;
3199
3200 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3201
3202 if (!resolved)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003203 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003204
3205 return err;
3206}
3207
Scott Feldman6c707942014-11-28 14:34:28 +01003208static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003209 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003210 int flags, __be16 vlan_id)
3211{
3212 struct rocker_port *p;
Simon Hormane5054642015-05-25 14:28:36 +09003213 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003214 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
Scott Feldman04f49fa2015-03-15 23:04:46 -07003215 u32 *group_ids;
Scott Feldman6c707942014-11-28 14:34:28 +01003216 u8 group_count = 0;
Scott Feldman04f49fa2015-03-15 23:04:46 -07003217 int err = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01003218 int i;
3219
Jiri Pirkob15edf82016-02-16 15:14:39 +01003220 group_ids = rocker_kcalloc(trans, flags,
3221 rocker->port_count, sizeof(u32));
Scott Feldman04f49fa2015-03-15 23:04:46 -07003222 if (!group_ids)
3223 return -ENOMEM;
3224
Scott Feldman6c707942014-11-28 14:34:28 +01003225 /* Adjust the flood group for this VLAN. The flood group
3226 * references an L2 interface group for each port in this
3227 * VLAN.
3228 */
3229
3230 for (i = 0; i < rocker->port_count; i++) {
3231 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003232 if (!p)
3233 continue;
Scott Feldman6c707942014-11-28 14:34:28 +01003234 if (!rocker_port_is_bridged(p))
3235 continue;
3236 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3237 group_ids[group_count++] =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003238 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003239 }
3240 }
3241
3242 /* If there are no bridged ports in this VLAN, we're done */
3243 if (group_count == 0)
Scott Feldman04f49fa2015-03-15 23:04:46 -07003244 goto no_ports_in_vlan;
Scott Feldman6c707942014-11-28 14:34:28 +01003245
Jiri Pirko76c6f942015-09-24 10:02:44 +02003246 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003247 group_count, group_ids, group_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003248 if (err)
3249 netdev_err(rocker_port->dev,
3250 "Error (%d) port VLAN l2 flood group\n", err);
3251
Scott Feldman04f49fa2015-03-15 23:04:46 -07003252no_ports_in_vlan:
Jiri Pirkob15edf82016-02-16 15:14:39 +01003253 rocker_kfree(trans, group_ids);
Scott Feldman6c707942014-11-28 14:34:28 +01003254 return err;
3255}
3256
3257static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003258 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003259 __be16 vlan_id, bool pop_vlan)
Scott Feldman6c707942014-11-28 14:34:28 +01003260{
Simon Hormane5054642015-05-25 14:28:36 +09003261 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003262 struct rocker_port *p;
3263 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003264 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003265 int ref = 0;
3266 int err;
3267 int i;
3268
3269 /* An L2 interface group for this port in this VLAN, but
3270 * only when port STP state is LEARNING|FORWARDING.
3271 */
3272
3273 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3274 rocker_port->stp_state == BR_STATE_FORWARDING) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003275 out_pport = rocker_port->pport;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003276 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003277 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003278 if (err) {
3279 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003280 "Error (%d) port VLAN l2 group for pport %d\n",
3281 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003282 return err;
3283 }
3284 }
3285
3286 /* An L2 interface group for this VLAN to CPU port.
3287 * Add when first port joins this VLAN and destroy when
3288 * last port leaves this VLAN.
3289 */
3290
3291 for (i = 0; i < rocker->port_count; i++) {
3292 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003293 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003294 ref++;
3295 }
3296
3297 if ((!adding || ref != 1) && (adding || ref != 0))
3298 return 0;
3299
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003300 out_pport = 0;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003301 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003302 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003303 if (err) {
3304 netdev_err(rocker_port->dev,
3305 "Error (%d) port VLAN l2 group for CPU port\n", err);
3306 return err;
3307 }
3308
3309 return 0;
3310}
3311
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003312static struct rocker_ctrl {
3313 const u8 *eth_dst;
3314 const u8 *eth_dst_mask;
Jiri Pirko11e6c652014-12-03 14:14:54 +01003315 __be16 eth_type;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003316 bool acl;
3317 bool bridge;
3318 bool term;
3319 bool copy_to_cpu;
3320} rocker_ctrls[] = {
3321 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3322 /* pass link local multicast pkts up to CPU for filtering */
3323 .eth_dst = ll_mac,
3324 .eth_dst_mask = ll_mask,
3325 .acl = true,
3326 },
3327 [ROCKER_CTRL_LOCAL_ARP] = {
3328 /* pass local ARP pkts up to CPU */
3329 .eth_dst = zero_mac,
3330 .eth_dst_mask = zero_mac,
3331 .eth_type = htons(ETH_P_ARP),
3332 .acl = true,
3333 },
3334 [ROCKER_CTRL_IPV4_MCAST] = {
3335 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3336 .eth_dst = ipv4_mcast,
3337 .eth_dst_mask = ipv4_mask,
3338 .eth_type = htons(ETH_P_IP),
3339 .term = true,
3340 .copy_to_cpu = true,
3341 },
3342 [ROCKER_CTRL_IPV6_MCAST] = {
3343 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3344 .eth_dst = ipv6_mcast,
3345 .eth_dst_mask = ipv6_mask,
3346 .eth_type = htons(ETH_P_IPV6),
3347 .term = true,
3348 .copy_to_cpu = true,
3349 },
3350 [ROCKER_CTRL_DFLT_BRIDGING] = {
3351 /* flood any pkts on vlan */
3352 .bridge = true,
3353 .copy_to_cpu = true,
3354 },
Simon Horman82549732015-07-16 10:39:14 +09003355 [ROCKER_CTRL_DFLT_OVS] = {
3356 /* pass all pkts up to CPU */
3357 .eth_dst = zero_mac,
3358 .eth_dst_mask = zero_mac,
3359 .acl = true,
3360 },
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003361};
3362
3363static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003364 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003365 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003366{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003367 u32 in_pport = rocker_port->pport;
3368 u32 in_pport_mask = 0xffffffff;
3369 u32 out_pport = 0;
Simon Hormane5054642015-05-25 14:28:36 +09003370 const u8 *eth_src = NULL;
3371 const u8 *eth_src_mask = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003372 __be16 vlan_id_mask = htons(0xffff);
3373 u8 ip_proto = 0;
3374 u8 ip_proto_mask = 0;
3375 u8 ip_tos = 0;
3376 u8 ip_tos_mask = 0;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003377 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003378 int err;
3379
Jiri Pirko76c6f942015-09-24 10:02:44 +02003380 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003381 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003382 eth_src, eth_src_mask,
3383 ctrl->eth_dst, ctrl->eth_dst_mask,
3384 ctrl->eth_type,
3385 vlan_id, vlan_id_mask,
3386 ip_proto, ip_proto_mask,
3387 ip_tos, ip_tos_mask,
3388 group_id);
3389
3390 if (err)
3391 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3392
3393 return err;
3394}
3395
Scott Feldman6c707942014-11-28 14:34:28 +01003396static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003397 struct switchdev_trans *trans,
3398 int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003399 const struct rocker_ctrl *ctrl,
Scott Feldman6c707942014-11-28 14:34:28 +01003400 __be16 vlan_id)
3401{
3402 enum rocker_of_dpa_table_id goto_tbl =
3403 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3404 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3405 u32 tunnel_id = 0;
3406 int err;
3407
3408 if (!rocker_port_is_bridged(rocker_port))
3409 return 0;
3410
Jiri Pirko76c6f942015-09-24 10:02:44 +02003411 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003412 ctrl->eth_dst, ctrl->eth_dst_mask,
3413 vlan_id, tunnel_id,
3414 goto_tbl, group_id, ctrl->copy_to_cpu);
3415
3416 if (err)
3417 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3418
3419 return err;
3420}
3421
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003422static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003423 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003424 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003425{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003426 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003427 __be16 vlan_id_mask = htons(0xffff);
3428 int err;
3429
3430 if (ntohs(vlan_id) == 0)
3431 vlan_id = rocker_port->internal_vlan_id;
3432
Jiri Pirko76c6f942015-09-24 10:02:44 +02003433 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003434 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003435 ctrl->eth_type, ctrl->eth_dst,
3436 ctrl->eth_dst_mask, vlan_id,
3437 vlan_id_mask, ctrl->copy_to_cpu,
3438 flags);
3439
3440 if (err)
3441 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3442
3443 return err;
3444}
3445
Scott Feldmanc4f20322015-05-10 09:47:50 -07003446static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003447 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003448 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003449{
3450 if (ctrl->acl)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003451 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003452 ctrl, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003453 if (ctrl->bridge)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003454 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003455 ctrl, vlan_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003456
3457 if (ctrl->term)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003458 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003459 ctrl, vlan_id);
3460
3461 return -EOPNOTSUPP;
3462}
3463
3464static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003465 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003466 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003467{
3468 int err = 0;
3469 int i;
3470
3471 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3472 if (rocker_port->ctrls[i]) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003473 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003474 &rocker_ctrls[i], vlan_id);
3475 if (err)
3476 return err;
3477 }
3478 }
3479
3480 return err;
3481}
3482
Scott Feldmanc4f20322015-05-10 09:47:50 -07003483static int rocker_port_ctrl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003484 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003485 const struct rocker_ctrl *ctrl)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003486{
3487 u16 vid;
3488 int err = 0;
3489
3490 for (vid = 1; vid < VLAN_N_VID; vid++) {
3491 if (!test_bit(vid, rocker_port->vlan_bitmap))
3492 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003493 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003494 ctrl, htons(vid));
3495 if (err)
3496 break;
3497 }
3498
3499 return err;
3500}
3501
Scott Feldmanc4f20322015-05-10 09:47:50 -07003502static int rocker_port_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003503 struct switchdev_trans *trans, int flags, u16 vid)
Scott Feldman6c707942014-11-28 14:34:28 +01003504{
3505 enum rocker_of_dpa_table_id goto_tbl =
3506 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003507 u32 in_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003508 __be16 vlan_id = htons(vid);
3509 __be16 vlan_id_mask = htons(0xffff);
3510 __be16 internal_vlan_id;
3511 bool untagged;
3512 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3513 int err;
3514
3515 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3516
Scott Feldman9228ad22015-05-10 09:47:54 -07003517 if (adding && test_bit(ntohs(internal_vlan_id),
3518 rocker_port->vlan_bitmap))
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003519 return 0; /* already added */
Scott Feldman9228ad22015-05-10 09:47:54 -07003520 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3521 rocker_port->vlan_bitmap))
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01003522 return 0; /* already removed */
Scott Feldman6c707942014-11-28 14:34:28 +01003523
Scott Feldman9228ad22015-05-10 09:47:54 -07003524 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3525
Scott Feldman6c707942014-11-28 14:34:28 +01003526 if (adding) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003527 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003528 internal_vlan_id);
3529 if (err) {
3530 netdev_err(rocker_port->dev,
3531 "Error (%d) port ctrl vlan add\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003532 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003533 }
3534 }
3535
Jiri Pirko76c6f942015-09-24 10:02:44 +02003536 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003537 internal_vlan_id, untagged);
3538 if (err) {
3539 netdev_err(rocker_port->dev,
3540 "Error (%d) port VLAN l2 groups\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003541 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003542 }
3543
Jiri Pirko76c6f942015-09-24 10:02:44 +02003544 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003545 internal_vlan_id);
3546 if (err) {
3547 netdev_err(rocker_port->dev,
3548 "Error (%d) port VLAN l2 flood group\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003549 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003550 }
3551
Jiri Pirko76c6f942015-09-24 10:02:44 +02003552 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003553 in_pport, vlan_id, vlan_id_mask,
Scott Feldman6c707942014-11-28 14:34:28 +01003554 goto_tbl, untagged, internal_vlan_id);
3555 if (err)
3556 netdev_err(rocker_port->dev,
3557 "Error (%d) port VLAN table\n", err);
3558
Scott Feldman9228ad22015-05-10 09:47:54 -07003559err_out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02003560 if (switchdev_trans_ph_prepare(trans))
Scott Feldman9228ad22015-05-10 09:47:54 -07003561 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3562
Scott Feldman6c707942014-11-28 14:34:28 +01003563 return err;
3564}
3565
Scott Feldmanc4f20322015-05-10 09:47:50 -07003566static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003567 struct switchdev_trans *trans, int flags)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003568{
3569 enum rocker_of_dpa_table_id goto_tbl;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003570 u32 in_pport;
3571 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003572 int err;
3573
3574 /* Normal Ethernet Frames. Matches pkts from any local physical
3575 * ports. Goto VLAN tbl.
3576 */
3577
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003578 in_pport = 0;
3579 in_pport_mask = 0xffff0000;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003580 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3581
Jiri Pirko76c6f942015-09-24 10:02:44 +02003582 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003583 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003584 goto_tbl);
3585 if (err)
3586 netdev_err(rocker_port->dev,
3587 "Error (%d) ingress port table entry\n", err);
3588
3589 return err;
3590}
3591
Scott Feldman6c707942014-11-28 14:34:28 +01003592struct rocker_fdb_learn_work {
3593 struct work_struct work;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003594 struct rocker_port *rocker_port;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003595 struct switchdev_trans *trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003596 int flags;
3597 u8 addr[ETH_ALEN];
3598 u16 vid;
3599};
3600
3601static void rocker_port_fdb_learn_work(struct work_struct *work)
3602{
Simon Hormane5054642015-05-25 14:28:36 +09003603 const struct rocker_fdb_learn_work *lw =
Scott Feldman6c707942014-11-28 14:34:28 +01003604 container_of(work, struct rocker_fdb_learn_work, work);
3605 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3606 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003607 struct switchdev_notifier_fdb_info info;
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003608
3609 info.addr = lw->addr;
3610 info.vid = lw->vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003611
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +01003612 rtnl_lock();
Thomas Graf51ace882014-11-28 14:34:32 +01003613 if (learned && removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003614 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003615 lw->rocker_port->dev, &info.info);
Thomas Graf51ace882014-11-28 14:34:32 +01003616 else if (learned && !removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003617 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003618 lw->rocker_port->dev, &info.info);
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +01003619 rtnl_unlock();
Scott Feldman6c707942014-11-28 14:34:28 +01003620
Jiri Pirkob15edf82016-02-16 15:14:39 +01003621 rocker_kfree(lw->trans, work);
Scott Feldman6c707942014-11-28 14:34:28 +01003622}
3623
3624static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003625 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003626 const u8 *addr, __be16 vlan_id)
Scott Feldman6c707942014-11-28 14:34:28 +01003627{
3628 struct rocker_fdb_learn_work *lw;
3629 enum rocker_of_dpa_table_id goto_tbl =
3630 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003631 u32 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003632 u32 tunnel_id = 0;
3633 u32 group_id = ROCKER_GROUP_NONE;
Scott Feldman5111f802014-11-28 14:34:30 +01003634 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
Scott Feldman6c707942014-11-28 14:34:28 +01003635 bool copy_to_cpu = false;
3636 int err;
3637
3638 if (rocker_port_is_bridged(rocker_port))
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003639 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003640
3641 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003642 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003643 NULL, vlan_id, tunnel_id, goto_tbl,
Scott Feldman6c707942014-11-28 14:34:28 +01003644 group_id, copy_to_cpu);
3645 if (err)
3646 return err;
3647 }
3648
Scott Feldman5111f802014-11-28 14:34:30 +01003649 if (!syncing)
3650 return 0;
3651
Scott Feldman6c707942014-11-28 14:34:28 +01003652 if (!rocker_port_is_bridged(rocker_port))
3653 return 0;
3654
Jiri Pirkob15edf82016-02-16 15:14:39 +01003655 lw = rocker_kzalloc(trans, flags, sizeof(*lw));
Scott Feldman6c707942014-11-28 14:34:28 +01003656 if (!lw)
3657 return -ENOMEM;
3658
3659 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3660
Scott Feldmanc4f20322015-05-10 09:47:50 -07003661 lw->rocker_port = rocker_port;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003662 lw->trans = trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003663 lw->flags = flags;
3664 ether_addr_copy(lw->addr, addr);
3665 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3666
Jiri Pirko76c6f942015-09-24 10:02:44 +02003667 if (switchdev_trans_ph_prepare(trans))
Jiri Pirkob15edf82016-02-16 15:14:39 +01003668 rocker_kfree(trans, lw);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003669 else
3670 schedule_work(&lw->work);
Scott Feldman6c707942014-11-28 14:34:28 +01003671
3672 return 0;
3673}
3674
3675static struct rocker_fdb_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003676rocker_fdb_tbl_find(const struct rocker *rocker,
3677 const struct rocker_fdb_tbl_entry *match)
Scott Feldman6c707942014-11-28 14:34:28 +01003678{
3679 struct rocker_fdb_tbl_entry *found;
3680
3681 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3682 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3683 return found;
3684
3685 return NULL;
3686}
3687
3688static int rocker_port_fdb(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003689 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003690 const unsigned char *addr,
3691 __be16 vlan_id, int flags)
3692{
3693 struct rocker *rocker = rocker_port->rocker;
3694 struct rocker_fdb_tbl_entry *fdb;
3695 struct rocker_fdb_tbl_entry *found;
3696 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3697 unsigned long lock_flags;
3698
Jiri Pirkob15edf82016-02-16 15:14:39 +01003699 fdb = rocker_kzalloc(trans, flags, sizeof(*fdb));
Scott Feldman6c707942014-11-28 14:34:28 +01003700 if (!fdb)
3701 return -ENOMEM;
3702
3703 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
Scott Feldmana471be42015-09-23 08:39:14 -07003704 fdb->touched = jiffies;
Scott Feldman4c660492015-09-23 08:39:15 -07003705 fdb->key.rocker_port = rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01003706 ether_addr_copy(fdb->key.addr, addr);
3707 fdb->key.vlan_id = vlan_id;
3708 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3709
3710 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3711
3712 found = rocker_fdb_tbl_find(rocker, fdb);
3713
Scott Feldmana471be42015-09-23 08:39:14 -07003714 if (found) {
3715 found->touched = jiffies;
3716 if (removing) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01003717 rocker_kfree(trans, fdb);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003718 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmana471be42015-09-23 08:39:14 -07003719 hash_del(&found->entry);
3720 }
3721 } else if (!removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003722 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmana471be42015-09-23 08:39:14 -07003723 hash_add(rocker->fdb_tbl, &fdb->entry,
3724 fdb->key_crc32);
Scott Feldman6c707942014-11-28 14:34:28 +01003725 }
3726
3727 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3728
3729 /* Check if adding and already exists, or removing and can't find */
3730 if (!found != !removing) {
Jiri Pirkob15edf82016-02-16 15:14:39 +01003731 rocker_kfree(trans, fdb);
Scott Feldman6c707942014-11-28 14:34:28 +01003732 if (!found && removing)
3733 return 0;
3734 /* Refreshing existing to update aging timers */
3735 flags |= ROCKER_OP_FLAG_REFRESH;
3736 }
3737
Jiri Pirko76c6f942015-09-24 10:02:44 +02003738 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003739}
3740
Scott Feldmanc4f20322015-05-10 09:47:50 -07003741static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003742 struct switchdev_trans *trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003743{
3744 struct rocker *rocker = rocker_port->rocker;
3745 struct rocker_fdb_tbl_entry *found;
3746 unsigned long lock_flags;
Scott Feldman6c707942014-11-28 14:34:28 +01003747 struct hlist_node *tmp;
3748 int bkt;
3749 int err = 0;
3750
3751 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3752 rocker_port->stp_state == BR_STATE_FORWARDING)
3753 return 0;
3754
Jiri Pirkod33eeb62015-10-14 19:40:54 +02003755 flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
Scott Feldman179f9a22015-06-12 21:35:46 -07003756
Scott Feldman6c707942014-11-28 14:34:28 +01003757 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3758
3759 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07003760 if (found->key.rocker_port != rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +01003761 continue;
3762 if (!found->learned)
3763 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003764 err = rocker_port_fdb_learn(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003765 found->key.addr,
3766 found->key.vlan_id);
3767 if (err)
3768 goto err_out;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003769 if (!switchdev_trans_ph_prepare(trans))
Simon Horman3098ac32015-05-21 12:40:14 +09003770 hash_del(&found->entry);
Scott Feldman6c707942014-11-28 14:34:28 +01003771 }
3772
3773err_out:
3774 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3775
3776 return err;
3777}
3778
Scott Feldman52fe3e22015-09-23 08:39:18 -07003779static void rocker_fdb_cleanup(unsigned long data)
3780{
3781 struct rocker *rocker = (struct rocker *)data;
3782 struct rocker_port *rocker_port;
3783 struct rocker_fdb_tbl_entry *entry;
3784 struct hlist_node *tmp;
3785 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3786 unsigned long expires;
3787 unsigned long lock_flags;
3788 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3789 ROCKER_OP_FLAG_LEARNED;
3790 int bkt;
3791
3792 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3793
3794 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3795 if (!entry->learned)
3796 continue;
3797 rocker_port = entry->key.rocker_port;
3798 expires = entry->touched + rocker_port->ageing_time;
3799 if (time_before_eq(expires, jiffies)) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003800 rocker_port_fdb_learn(rocker_port, NULL,
Scott Feldman52fe3e22015-09-23 08:39:18 -07003801 flags, entry->key.addr,
3802 entry->key.vlan_id);
3803 hash_del(&entry->entry);
3804 } else if (time_before(expires, next_timer)) {
3805 next_timer = expires;
3806 }
3807 }
3808
3809 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3810
3811 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3812}
3813
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003814static int rocker_port_router_mac(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003815 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003816 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003817{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003818 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003819 __be16 eth_type;
3820 const u8 *dst_mac_mask = ff_mac;
3821 __be16 vlan_id_mask = htons(0xffff);
3822 bool copy_to_cpu = false;
3823 int err;
3824
3825 if (ntohs(vlan_id) == 0)
3826 vlan_id = rocker_port->internal_vlan_id;
3827
3828 eth_type = htons(ETH_P_IP);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003829 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003830 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003831 eth_type, rocker_port->dev->dev_addr,
3832 dst_mac_mask, vlan_id, vlan_id_mask,
3833 copy_to_cpu, flags);
3834 if (err)
3835 return err;
3836
3837 eth_type = htons(ETH_P_IPV6);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003838 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003839 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003840 eth_type, rocker_port->dev->dev_addr,
3841 dst_mac_mask, vlan_id, vlan_id_mask,
3842 copy_to_cpu, flags);
3843
3844 return err;
3845}
3846
Scott Feldmanc4f20322015-05-10 09:47:50 -07003847static int rocker_port_fwding(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003848 struct switchdev_trans *trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003849{
3850 bool pop_vlan;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003851 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003852 __be16 vlan_id;
3853 u16 vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003854 int err;
3855
3856 /* Port will be forwarding-enabled if its STP state is LEARNING
3857 * or FORWARDING. Traffic from CPU can still egress, regardless of
3858 * port STP state. Use L2 interface group on port VLANs as a way
3859 * to toggle port forwarding: if forwarding is disabled, L2
3860 * interface group will not exist.
3861 */
3862
3863 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3864 rocker_port->stp_state != BR_STATE_FORWARDING)
3865 flags |= ROCKER_OP_FLAG_REMOVE;
3866
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003867 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003868 for (vid = 1; vid < VLAN_N_VID; vid++) {
3869 if (!test_bit(vid, rocker_port->vlan_bitmap))
3870 continue;
3871 vlan_id = htons(vid);
3872 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003873 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003874 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003875 if (err) {
3876 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003877 "Error (%d) port VLAN l2 group for pport %d\n",
3878 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003879 return err;
3880 }
3881 }
3882
3883 return 0;
3884}
3885
Scott Feldmanc4f20322015-05-10 09:47:50 -07003886static int rocker_port_stp_update(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003887 struct switchdev_trans *trans, int flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07003888 u8 state)
Scott Feldman6c707942014-11-28 14:34:28 +01003889{
3890 bool want[ROCKER_CTRL_MAX] = { 0, };
Scott Feldmanc4f20322015-05-10 09:47:50 -07003891 bool prev_ctrls[ROCKER_CTRL_MAX];
Jiri Pirko76c6f942015-09-24 10:02:44 +02003892 u8 uninitialized_var(prev_state);
Scott Feldman6c707942014-11-28 14:34:28 +01003893 int err;
3894 int i;
3895
Jiri Pirko76c6f942015-09-24 10:02:44 +02003896 if (switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003897 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3898 prev_state = rocker_port->stp_state;
3899 }
3900
Scott Feldman6c707942014-11-28 14:34:28 +01003901 if (rocker_port->stp_state == state)
3902 return 0;
3903
3904 rocker_port->stp_state = state;
3905
3906 switch (state) {
3907 case BR_STATE_DISABLED:
3908 /* port is completely disabled */
3909 break;
3910 case BR_STATE_LISTENING:
3911 case BR_STATE_BLOCKING:
3912 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3913 break;
3914 case BR_STATE_LEARNING:
3915 case BR_STATE_FORWARDING:
Simon Horman82549732015-07-16 10:39:14 +09003916 if (!rocker_port_is_ovsed(rocker_port))
3917 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003918 want[ROCKER_CTRL_IPV4_MCAST] = true;
3919 want[ROCKER_CTRL_IPV6_MCAST] = true;
3920 if (rocker_port_is_bridged(rocker_port))
3921 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
Simon Horman82549732015-07-16 10:39:14 +09003922 else if (rocker_port_is_ovsed(rocker_port))
3923 want[ROCKER_CTRL_DFLT_OVS] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003924 else
3925 want[ROCKER_CTRL_LOCAL_ARP] = true;
3926 break;
3927 }
3928
3929 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3930 if (want[i] != rocker_port->ctrls[i]) {
Scott Feldman179f9a22015-06-12 21:35:46 -07003931 int ctrl_flags = flags |
3932 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003933 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003934 &rocker_ctrls[i]);
3935 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003936 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003937 rocker_port->ctrls[i] = want[i];
3938 }
3939 }
3940
Jiri Pirko76c6f942015-09-24 10:02:44 +02003941 err = rocker_port_fdb_flush(rocker_port, trans, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01003942 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003943 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003944
Jiri Pirko76c6f942015-09-24 10:02:44 +02003945 err = rocker_port_fwding(rocker_port, trans, flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003946
3947err_out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02003948 if (switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003949 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3950 rocker_port->stp_state = prev_state;
3951 }
3952
3953 return err;
Scott Feldman6c707942014-11-28 14:34:28 +01003954}
3955
Scott Feldmanc4f20322015-05-10 09:47:50 -07003956static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003957 struct switchdev_trans *trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003958{
3959 if (rocker_port_is_bridged(rocker_port))
3960 /* bridge STP will enable port */
3961 return 0;
3962
3963 /* port is not bridged, so simulate going to FORWARDING state */
Jiri Pirko76c6f942015-09-24 10:02:44 +02003964 return rocker_port_stp_update(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07003965 BR_STATE_FORWARDING);
Scott Feldmane47172a2015-02-25 20:15:38 -08003966}
3967
Scott Feldmanc4f20322015-05-10 09:47:50 -07003968static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003969 struct switchdev_trans *trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003970{
3971 if (rocker_port_is_bridged(rocker_port))
3972 /* bridge STP will disable port */
3973 return 0;
3974
3975 /* port is not bridged, so simulate going to DISABLED state */
Jiri Pirko76c6f942015-09-24 10:02:44 +02003976 return rocker_port_stp_update(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07003977 BR_STATE_DISABLED);
Scott Feldmane47172a2015-02-25 20:15:38 -08003978}
3979
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003980static struct rocker_internal_vlan_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003981rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003982{
3983 struct rocker_internal_vlan_tbl_entry *found;
3984
3985 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3986 entry, ifindex) {
3987 if (found->ifindex == ifindex)
3988 return found;
3989 }
3990
3991 return NULL;
3992}
3993
3994static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3995 int ifindex)
3996{
3997 struct rocker *rocker = rocker_port->rocker;
3998 struct rocker_internal_vlan_tbl_entry *entry;
3999 struct rocker_internal_vlan_tbl_entry *found;
4000 unsigned long lock_flags;
4001 int i;
4002
Simon Hormandf6a2062015-05-21 12:40:17 +09004003 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004004 if (!entry)
4005 return 0;
4006
4007 entry->ifindex = ifindex;
4008
4009 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4010
4011 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4012 if (found) {
Simon Hormandf6a2062015-05-21 12:40:17 +09004013 kfree(entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004014 goto found;
4015 }
4016
4017 found = entry;
4018 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
4019
4020 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
4021 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
4022 continue;
4023 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
4024 goto found;
4025 }
4026
4027 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
4028
4029found:
4030 found->ref_count++;
4031 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4032
4033 return found->vlan_id;
4034}
4035
Simon Hormane5054642015-05-25 14:28:36 +09004036static void
4037rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
4038 int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004039{
4040 struct rocker *rocker = rocker_port->rocker;
4041 struct rocker_internal_vlan_tbl_entry *found;
4042 unsigned long lock_flags;
4043 unsigned long bit;
4044
4045 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
4046
4047 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
4048 if (!found) {
4049 netdev_err(rocker_port->dev,
4050 "ifindex (%d) not found in internal VLAN tbl\n",
4051 ifindex);
4052 goto not_found;
4053 }
4054
4055 if (--found->ref_count <= 0) {
4056 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
4057 clear_bit(bit, rocker->internal_vlan_bitmap);
4058 hash_del(&found->entry);
Simon Hormandf6a2062015-05-21 12:40:17 +09004059 kfree(found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004060 }
4061
4062not_found:
4063 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
4064}
4065
Scott Feldmanc4f20322015-05-10 09:47:50 -07004066static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004067 struct switchdev_trans *trans, __be32 dst,
Simon Hormane5054642015-05-25 14:28:36 +09004068 int dst_len, const struct fib_info *fi,
4069 u32 tb_id, int flags)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004070{
Simon Hormane5054642015-05-25 14:28:36 +09004071 const struct fib_nh *nh;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004072 __be16 eth_type = htons(ETH_P_IP);
4073 __be32 dst_mask = inet_make_mask(dst_len);
4074 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4075 u32 priority = fi->fib_priority;
4076 enum rocker_of_dpa_table_id goto_tbl =
4077 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4078 u32 group_id;
4079 bool nh_on_port;
4080 bool has_gw;
4081 u32 index;
4082 int err;
4083
4084 /* XXX support ECMP */
4085
4086 nh = fi->fib_nh;
4087 nh_on_port = (fi->fib_dev == rocker_port->dev);
4088 has_gw = !!nh->nh_gw;
4089
4090 if (has_gw && nh_on_port) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02004091 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004092 nh->nh_gw, &index);
4093 if (err)
4094 return err;
4095
4096 group_id = ROCKER_GROUP_L3_UNICAST(index);
4097 } else {
4098 /* Send to CPU for processing */
4099 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4100 }
4101
Jiri Pirko76c6f942015-09-24 10:02:44 +02004102 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004103 dst_mask, priority, goto_tbl,
4104 group_id, flags);
4105 if (err)
4106 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4107 err, &dst);
4108
4109 return err;
4110}
4111
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004112/*****************
4113 * Net device ops
4114 *****************/
4115
4116static int rocker_port_open(struct net_device *dev)
4117{
4118 struct rocker_port *rocker_port = netdev_priv(dev);
4119 int err;
4120
4121 err = rocker_port_dma_rings_init(rocker_port);
4122 if (err)
4123 return err;
4124
4125 err = request_irq(rocker_msix_tx_vector(rocker_port),
4126 rocker_tx_irq_handler, 0,
4127 rocker_driver_name, rocker_port);
4128 if (err) {
4129 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4130 goto err_request_tx_irq;
4131 }
4132
4133 err = request_irq(rocker_msix_rx_vector(rocker_port),
4134 rocker_rx_irq_handler, 0,
4135 rocker_driver_name, rocker_port);
4136 if (err) {
4137 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4138 goto err_request_rx_irq;
4139 }
4140
Jiri Pirkoe4201142016-02-16 15:14:45 +01004141 err = rocker_world_port_open(rocker_port);
4142 if (err) {
4143 netdev_err(rocker_port->dev, "cannot open port in world\n");
4144 goto err_world_port_open;
4145 }
4146
Jiri Pirko76c6f942015-09-24 10:02:44 +02004147 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01004148 if (err)
Scott Feldmane47172a2015-02-25 20:15:38 -08004149 goto err_fwd_enable;
Scott Feldman6c707942014-11-28 14:34:28 +01004150
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004151 napi_enable(&rocker_port->napi_tx);
4152 napi_enable(&rocker_port->napi_rx);
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004153 if (!dev->proto_down)
4154 rocker_port_set_enable(rocker_port, true);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004155 netif_start_queue(dev);
4156 return 0;
4157
Scott Feldmane47172a2015-02-25 20:15:38 -08004158err_fwd_enable:
Jiri Pirkoe4201142016-02-16 15:14:45 +01004159err_world_port_open:
Scott Feldman6c707942014-11-28 14:34:28 +01004160 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004161err_request_rx_irq:
4162 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4163err_request_tx_irq:
4164 rocker_port_dma_rings_fini(rocker_port);
4165 return err;
4166}
4167
4168static int rocker_port_stop(struct net_device *dev)
4169{
4170 struct rocker_port *rocker_port = netdev_priv(dev);
4171
4172 netif_stop_queue(dev);
4173 rocker_port_set_enable(rocker_port, false);
4174 napi_disable(&rocker_port->napi_rx);
4175 napi_disable(&rocker_port->napi_tx);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004176 rocker_world_port_stop(rocker_port);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004177 rocker_port_fwd_disable(rocker_port, NULL,
Scott Feldmanf66feaa2015-06-12 21:35:50 -07004178 ROCKER_OP_FLAG_NOWAIT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004179 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4180 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4181 rocker_port_dma_rings_fini(rocker_port);
4182
4183 return 0;
4184}
4185
Simon Hormane5054642015-05-25 14:28:36 +09004186static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4187 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004188{
Simon Hormane5054642015-05-25 14:28:36 +09004189 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004190 struct pci_dev *pdev = rocker->pdev;
Simon Hormane5054642015-05-25 14:28:36 +09004191 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004192 struct rocker_tlv *attr;
4193 int rem;
4194
4195 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4196 if (!attrs[ROCKER_TLV_TX_FRAGS])
4197 return;
4198 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
Simon Hormane5054642015-05-25 14:28:36 +09004199 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004200 dma_addr_t dma_handle;
4201 size_t len;
4202
4203 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4204 continue;
4205 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4206 attr);
4207 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4208 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4209 continue;
4210 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4211 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4212 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4213 }
4214}
4215
Simon Hormane5054642015-05-25 14:28:36 +09004216static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004217 struct rocker_desc_info *desc_info,
4218 char *buf, size_t buf_len)
4219{
Simon Hormane5054642015-05-25 14:28:36 +09004220 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004221 struct pci_dev *pdev = rocker->pdev;
4222 dma_addr_t dma_handle;
4223 struct rocker_tlv *frag;
4224
4225 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4226 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4227 if (net_ratelimit())
4228 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4229 return -EIO;
4230 }
4231 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4232 if (!frag)
4233 goto unmap_frag;
4234 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4235 dma_handle))
4236 goto nest_cancel;
4237 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4238 buf_len))
4239 goto nest_cancel;
4240 rocker_tlv_nest_end(desc_info, frag);
4241 return 0;
4242
4243nest_cancel:
4244 rocker_tlv_nest_cancel(desc_info, frag);
4245unmap_frag:
4246 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4247 return -EMSGSIZE;
4248}
4249
4250static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4251{
4252 struct rocker_port *rocker_port = netdev_priv(dev);
4253 struct rocker *rocker = rocker_port->rocker;
4254 struct rocker_desc_info *desc_info;
4255 struct rocker_tlv *frags;
4256 int i;
4257 int err;
4258
4259 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4260 if (unlikely(!desc_info)) {
4261 if (net_ratelimit())
4262 netdev_err(dev, "tx ring full when queue awake\n");
4263 return NETDEV_TX_BUSY;
4264 }
4265
4266 rocker_desc_cookie_ptr_set(desc_info, skb);
4267
4268 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4269 if (!frags)
4270 goto out;
4271 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4272 skb->data, skb_headlen(skb));
4273 if (err)
4274 goto nest_cancel;
Jiri Pirko95b9be62015-08-02 20:56:38 +02004275 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4276 err = skb_linearize(skb);
4277 if (err)
4278 goto unmap_frags;
4279 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004280
4281 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4282 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4283
4284 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4285 skb_frag_address(frag),
4286 skb_frag_size(frag));
4287 if (err)
4288 goto unmap_frags;
4289 }
4290 rocker_tlv_nest_end(desc_info, frags);
4291
4292 rocker_desc_gen_clear(desc_info);
4293 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4294
4295 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4296 if (!desc_info)
4297 netif_stop_queue(dev);
4298
4299 return NETDEV_TX_OK;
4300
4301unmap_frags:
4302 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4303nest_cancel:
4304 rocker_tlv_nest_cancel(desc_info, frags);
4305out:
4306 dev_kfree_skb(skb);
David Ahernf2bbca52015-01-16 14:22:29 -07004307 dev->stats.tx_dropped++;
4308
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004309 return NETDEV_TX_OK;
4310}
4311
4312static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4313{
4314 struct sockaddr *addr = p;
4315 struct rocker_port *rocker_port = netdev_priv(dev);
4316 int err;
4317
4318 if (!is_valid_ether_addr(addr->sa_data))
4319 return -EADDRNOTAVAIL;
4320
4321 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4322 if (err)
4323 return err;
4324 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4325 return 0;
4326}
4327
Scott Feldman77a58c72015-07-08 16:06:47 -07004328static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4329{
4330 struct rocker_port *rocker_port = netdev_priv(dev);
4331 int running = netif_running(dev);
4332 int err;
4333
4334#define ROCKER_PORT_MIN_MTU 68
4335#define ROCKER_PORT_MAX_MTU 9000
4336
4337 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4338 return -EINVAL;
4339
4340 if (running)
4341 rocker_port_stop(dev);
4342
4343 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4344 dev->mtu = new_mtu;
4345
4346 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4347 if (err)
4348 return err;
4349
4350 if (running)
4351 err = rocker_port_open(dev);
4352
4353 return err;
4354}
4355
David Aherndb191702015-03-17 20:23:16 -06004356static int rocker_port_get_phys_port_name(struct net_device *dev,
4357 char *buf, size_t len)
4358{
4359 struct rocker_port *rocker_port = netdev_priv(dev);
4360 struct port_name name = { .buf = buf, .len = len };
4361 int err;
4362
Jiri Pirko76c6f942015-09-24 10:02:44 +02004363 err = rocker_cmd_exec(rocker_port, NULL, 0,
David Aherndb191702015-03-17 20:23:16 -06004364 rocker_cmd_get_port_settings_prep, NULL,
4365 rocker_cmd_get_port_settings_phys_name_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004366 &name);
David Aherndb191702015-03-17 20:23:16 -06004367
4368 return err ? -EOPNOTSUPP : 0;
4369}
4370
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004371static int rocker_port_change_proto_down(struct net_device *dev,
4372 bool proto_down)
4373{
4374 struct rocker_port *rocker_port = netdev_priv(dev);
4375
4376 if (rocker_port->dev->flags & IFF_UP)
4377 rocker_port_set_enable(rocker_port, !proto_down);
4378 rocker_port->dev->proto_down = proto_down;
4379 return 0;
4380}
4381
Scott Feldmandd19f832015-08-12 18:45:25 -07004382static void rocker_port_neigh_destroy(struct neighbour *n)
4383{
4384 struct rocker_port *rocker_port = netdev_priv(n->dev);
4385 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4386 __be32 ip_addr = *(__be32 *)n->primary_key;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004387 int err;
Scott Feldmandd19f832015-08-12 18:45:25 -07004388
Jiri Pirko76c6f942015-09-24 10:02:44 +02004389 rocker_port_ipv4_neigh(rocker_port, NULL,
Scott Feldmandd19f832015-08-12 18:45:25 -07004390 flags, ip_addr, n->ha);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004391 err = rocker_world_port_neigh_destroy(rocker_port, n);
4392 if (err)
4393 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
4394 err);
Scott Feldmandd19f832015-08-12 18:45:25 -07004395}
4396
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004397static const struct net_device_ops rocker_port_netdev_ops = {
4398 .ndo_open = rocker_port_open,
4399 .ndo_stop = rocker_port_stop,
4400 .ndo_start_xmit = rocker_port_xmit,
4401 .ndo_set_mac_address = rocker_port_set_mac_address,
Scott Feldman77a58c72015-07-08 16:06:47 -07004402 .ndo_change_mtu = rocker_port_change_mtu,
Scott Feldman85fdb952015-05-10 09:48:05 -07004403 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
Scott Feldmanfc8f40d2015-05-10 09:47:57 -07004404 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
Scott Feldman54ba5a02015-05-10 09:48:01 -07004405 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004406 .ndo_fdb_add = switchdev_port_fdb_add,
4407 .ndo_fdb_del = switchdev_port_fdb_del,
4408 .ndo_fdb_dump = switchdev_port_fdb_dump,
David Aherndb191702015-03-17 20:23:16 -06004409 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004410 .ndo_change_proto_down = rocker_port_change_proto_down,
Scott Feldmandd19f832015-08-12 18:45:25 -07004411 .ndo_neigh_destroy = rocker_port_neigh_destroy,
Scott Feldman98237d42015-03-15 21:07:15 -07004412};
4413
4414/********************
4415 * swdev interface
4416 ********************/
4417
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004418static int rocker_port_attr_get(struct net_device *dev,
4419 struct switchdev_attr *attr)
Scott Feldman98237d42015-03-15 21:07:15 -07004420{
Simon Hormane5054642015-05-25 14:28:36 +09004421 const struct rocker_port *rocker_port = netdev_priv(dev);
4422 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004423 int err = 0;
Scott Feldman98237d42015-03-15 21:07:15 -07004424
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004425 switch (attr->id) {
Jiri Pirko1f868392015-10-01 11:03:42 +02004426 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
Scott Feldman42275bd2015-05-13 11:16:50 -07004427 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4428 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004429 break;
Jiri Pirko1f868392015-10-01 11:03:42 +02004430 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
Scott Feldman42275bd2015-05-13 11:16:50 -07004431 attr->u.brport_flags = rocker_port->brport_flags;
Jiri Pirkoe4201142016-02-16 15:14:45 +01004432 err = rocker_world_port_attr_bridge_flags_get(rocker_port,
4433 &attr->u.brport_flags);
Scott Feldman6004c862015-05-10 09:47:55 -07004434 break;
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004435 default:
4436 return -EOPNOTSUPP;
4437 }
4438
Jiri Pirkoe4201142016-02-16 15:14:45 +01004439 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004440}
4441
Scott Feldman6004c862015-05-10 09:47:55 -07004442static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004443 struct switchdev_trans *trans,
Scott Feldman6004c862015-05-10 09:47:55 -07004444 unsigned long brport_flags)
4445{
4446 unsigned long orig_flags;
4447 int err = 0;
4448
4449 orig_flags = rocker_port->brport_flags;
4450 rocker_port->brport_flags = brport_flags;
4451 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01004452 err = rocker_port_set_learning(rocker_port, trans,
4453 !!(rocker_port->brport_flags & BR_LEARNING));
Scott Feldman6004c862015-05-10 09:47:55 -07004454
Jiri Pirko76c6f942015-09-24 10:02:44 +02004455 if (switchdev_trans_ph_prepare(trans))
Scott Feldman6004c862015-05-10 09:47:55 -07004456 rocker_port->brport_flags = orig_flags;
4457
4458 return err;
4459}
4460
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004461static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4462 struct switchdev_trans *trans,
4463 u32 ageing_time)
4464{
4465 if (!switchdev_trans_ph_prepare(trans)) {
4466 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4467 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4468 }
4469
4470 return 0;
4471}
4472
Scott Feldmanc4f20322015-05-10 09:47:50 -07004473static int rocker_port_attr_set(struct net_device *dev,
Jiri Pirkof7fadf32015-10-14 19:40:49 +02004474 const struct switchdev_attr *attr,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02004475 struct switchdev_trans *trans)
Scott Feldmanc4f20322015-05-10 09:47:50 -07004476{
4477 struct rocker_port *rocker_port = netdev_priv(dev);
4478 int err = 0;
4479
Scott Feldmanc4f20322015-05-10 09:47:50 -07004480 switch (attr->id) {
Jiri Pirko1f868392015-10-01 11:03:42 +02004481 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
Jiri Pirkod33eeb62015-10-14 19:40:54 +02004482 err = rocker_port_stp_update(rocker_port, trans, 0,
Scott Feldman42275bd2015-05-13 11:16:50 -07004483 attr->u.stp_state);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004484 if (err)
4485 break;
4486 err = rocker_world_port_attr_stp_state_set(rocker_port,
4487 attr->u.stp_state,
4488 trans);
Scott Feldman35636062015-05-10 09:47:51 -07004489 break;
Jiri Pirko1f868392015-10-01 11:03:42 +02004490 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
Jiri Pirko76c6f942015-09-24 10:02:44 +02004491 err = rocker_port_brport_flags_set(rocker_port, trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004492 attr->u.brport_flags);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004493 if (err)
4494 break;
4495 err = rocker_world_port_attr_bridge_flags_set(rocker_port,
4496 attr->u.brport_flags,
4497 trans);
Scott Feldman6004c862015-05-10 09:47:55 -07004498 break;
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004499 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4500 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4501 attr->u.ageing_time);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004502 if (err)
4503 break;
4504 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
4505 attr->u.ageing_time,
4506 trans);
Scott Feldmand0cf57f2015-10-08 19:23:20 -07004507 break;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004508 default:
4509 err = -EOPNOTSUPP;
4510 break;
4511 }
4512
4513 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004514}
4515
Scott Feldman9228ad22015-05-10 09:47:54 -07004516static int rocker_port_vlan_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004517 struct switchdev_trans *trans,
4518 u16 vid, u16 flags)
Scott Feldman9228ad22015-05-10 09:47:54 -07004519{
4520 int err;
4521
4522 /* XXX deal with flags for PVID and untagged */
4523
Jiri Pirko76c6f942015-09-24 10:02:44 +02004524 err = rocker_port_vlan(rocker_port, trans, 0, vid);
Scott Feldman9228ad22015-05-10 09:47:54 -07004525 if (err)
4526 return err;
4527
Jiri Pirko76c6f942015-09-24 10:02:44 +02004528 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
Scott Feldmancec04a62015-06-01 11:39:03 -07004529 if (err)
Jiri Pirko76c6f942015-09-24 10:02:44 +02004530 rocker_port_vlan(rocker_port, trans,
Scott Feldmancec04a62015-06-01 11:39:03 -07004531 ROCKER_OP_FLAG_REMOVE, vid);
4532
4533 return err;
Scott Feldman9228ad22015-05-10 09:47:54 -07004534}
4535
4536static int rocker_port_vlans_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004537 struct switchdev_trans *trans,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004538 const struct switchdev_obj_port_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004539{
4540 u16 vid;
4541 int err;
4542
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004543 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02004544 err = rocker_port_vlan_add(rocker_port, trans,
Scott Feldman9228ad22015-05-10 09:47:54 -07004545 vid, vlan->flags);
4546 if (err)
4547 return err;
4548 }
4549
4550 return 0;
4551}
4552
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004553static int rocker_port_fdb_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004554 struct switchdev_trans *trans,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004555 const struct switchdev_obj_port_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004556{
4557 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4558 int flags = 0;
4559
4560 if (!rocker_port_is_bridged(rocker_port))
4561 return -EINVAL;
4562
Jiri Pirko76c6f942015-09-24 10:02:44 +02004563 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004564}
4565
Scott Feldman9228ad22015-05-10 09:47:54 -07004566static int rocker_port_obj_add(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004567 const struct switchdev_obj *obj,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02004568 struct switchdev_trans *trans)
Scott Feldman9228ad22015-05-10 09:47:54 -07004569{
4570 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004571 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004572 int err = 0;
4573
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004574 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004575 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004576 err = rocker_port_vlans_add(rocker_port, trans,
4577 SWITCHDEV_OBJ_PORT_VLAN(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004578 if (err)
4579 break;
4580 err = rocker_world_port_obj_vlan_add(rocker_port,
4581 SWITCHDEV_OBJ_PORT_VLAN(obj),
4582 trans);
Scott Feldman9228ad22015-05-10 09:47:54 -07004583 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004584 case SWITCHDEV_OBJ_ID_IPV4_FIB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004585 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004586 err = rocker_port_fib_ipv4(rocker_port, trans,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004587 htonl(fib4->dst), fib4->dst_len,
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004588 &fib4->fi, fib4->tb_id, 0);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004589 if (err)
4590 break;
4591 err = rocker_world_port_obj_fib4_add(rocker_port,
4592 SWITCHDEV_OBJ_IPV4_FIB(obj),
4593 trans);
Scott Feldman58c2cb12015-05-10 09:48:06 -07004594 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004595 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004596 err = rocker_port_fdb_add(rocker_port, trans,
4597 SWITCHDEV_OBJ_PORT_FDB(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004598 if (err)
4599 break;
4600 err = rocker_world_port_obj_fdb_add(rocker_port,
4601 SWITCHDEV_OBJ_PORT_FDB(obj),
4602 trans);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004603 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004604 default:
4605 err = -EOPNOTSUPP;
4606 break;
4607 }
4608
4609 return err;
4610}
4611
4612static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4613 u16 vid, u16 flags)
4614{
4615 int err;
4616
Jiri Pirko76c6f942015-09-24 10:02:44 +02004617 err = rocker_port_router_mac(rocker_port, NULL,
Scott Feldman9228ad22015-05-10 09:47:54 -07004618 ROCKER_OP_FLAG_REMOVE, htons(vid));
4619 if (err)
4620 return err;
4621
Jiri Pirko76c6f942015-09-24 10:02:44 +02004622 return rocker_port_vlan(rocker_port, NULL,
Scott Feldman9228ad22015-05-10 09:47:54 -07004623 ROCKER_OP_FLAG_REMOVE, vid);
4624}
4625
4626static int rocker_port_vlans_del(struct rocker_port *rocker_port,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004627 const struct switchdev_obj_port_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004628{
4629 u16 vid;
4630 int err;
4631
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004632 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004633 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4634 if (err)
4635 return err;
4636 }
4637
4638 return 0;
4639}
4640
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004641static int rocker_port_fdb_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004642 struct switchdev_trans *trans,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004643 const struct switchdev_obj_port_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004644{
4645 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
Jiri Pirkod33eeb62015-10-14 19:40:54 +02004646 int flags = ROCKER_OP_FLAG_REMOVE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004647
4648 if (!rocker_port_is_bridged(rocker_port))
4649 return -EINVAL;
4650
Jiri Pirko76c6f942015-09-24 10:02:44 +02004651 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004652}
4653
Scott Feldman9228ad22015-05-10 09:47:54 -07004654static int rocker_port_obj_del(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004655 const struct switchdev_obj *obj)
Scott Feldman9228ad22015-05-10 09:47:54 -07004656{
4657 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004658 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004659 int err = 0;
4660
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004661 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004662 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004663 err = rocker_port_vlans_del(rocker_port,
4664 SWITCHDEV_OBJ_PORT_VLAN(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004665 if (err)
4666 break;
4667 err = rocker_world_port_obj_vlan_del(rocker_port,
4668 SWITCHDEV_OBJ_PORT_VLAN(obj));
Scott Feldman9228ad22015-05-10 09:47:54 -07004669 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004670 case SWITCHDEV_OBJ_ID_IPV4_FIB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004671 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004672 err = rocker_port_fib_ipv4(rocker_port, NULL,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004673 htonl(fib4->dst), fib4->dst_len,
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004674 &fib4->fi, fib4->tb_id,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004675 ROCKER_OP_FLAG_REMOVE);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004676 if (err)
4677 break;
4678 err = rocker_world_port_obj_fib4_del(rocker_port,
4679 SWITCHDEV_OBJ_IPV4_FIB(obj));
Scott Feldman58c2cb12015-05-10 09:48:06 -07004680 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004681 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004682 err = rocker_port_fdb_del(rocker_port, NULL,
4683 SWITCHDEV_OBJ_PORT_FDB(obj));
Jiri Pirkoe4201142016-02-16 15:14:45 +01004684 if (err)
4685 break;
4686 err = rocker_world_port_obj_fdb_del(rocker_port,
4687 SWITCHDEV_OBJ_PORT_FDB(obj));
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004688 break;
4689 default:
4690 err = -EOPNOTSUPP;
4691 break;
4692 }
4693
4694 return err;
4695}
4696
Simon Hormane5054642015-05-25 14:28:36 +09004697static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
Jiri Pirko52ba57c2015-10-01 11:03:44 +02004698 struct switchdev_obj_port_fdb *fdb,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004699 switchdev_obj_dump_cb_t *cb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004700{
4701 struct rocker *rocker = rocker_port->rocker;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004702 struct rocker_fdb_tbl_entry *found;
4703 struct hlist_node *tmp;
4704 unsigned long lock_flags;
4705 int bkt;
4706 int err = 0;
4707
4708 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4709 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07004710 if (found->key.rocker_port != rocker_port)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004711 continue;
Jiri Pirko850d0cb2015-10-14 19:40:51 +02004712 ether_addr_copy(fdb->addr, found->key.addr);
Vivien Didelotce80e7b2015-08-10 09:09:52 -04004713 fdb->ndm_state = NUD_REACHABLE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004714 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4715 found->key.vlan_id);
Jiri Pirko648b4a92015-10-01 11:03:45 +02004716 err = cb(&fdb->obj);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004717 if (err)
4718 break;
4719 }
4720 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4721
4722 return err;
4723}
4724
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004725static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
Jiri Pirko8f24f302015-10-01 11:03:43 +02004726 struct switchdev_obj_port_vlan *vlan,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004727 switchdev_obj_dump_cb_t *cb)
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004728{
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004729 u16 vid;
4730 int err = 0;
4731
4732 for (vid = 1; vid < VLAN_N_VID; vid++) {
4733 if (!test_bit(vid, rocker_port->vlan_bitmap))
4734 continue;
4735 vlan->flags = 0;
4736 if (rocker_vlan_id_is_internal(htons(vid)))
4737 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01004738 vlan->vid_begin = vid;
4739 vlan->vid_end = vid;
Jiri Pirko648b4a92015-10-01 11:03:45 +02004740 err = cb(&vlan->obj);
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004741 if (err)
4742 break;
4743 }
4744
4745 return err;
4746}
4747
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004748static int rocker_port_obj_dump(struct net_device *dev,
Jiri Pirko648b4a92015-10-01 11:03:45 +02004749 struct switchdev_obj *obj,
4750 switchdev_obj_dump_cb_t *cb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004751{
Simon Hormane5054642015-05-25 14:28:36 +09004752 const struct rocker_port *rocker_port = netdev_priv(dev);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004753 int err = 0;
4754
Jiri Pirko9e8f4a52015-10-01 11:03:46 +02004755 switch (obj->id) {
Jiri Pirko57d80832015-10-01 11:03:41 +02004756 case SWITCHDEV_OBJ_ID_PORT_FDB:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004757 err = rocker_port_fdb_dump(rocker_port,
4758 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004759 if (err)
4760 break;
4761 err = rocker_world_port_obj_fdb_dump(rocker_port,
4762 SWITCHDEV_OBJ_PORT_FDB(obj),
4763 cb);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004764 break;
Jiri Pirko57d80832015-10-01 11:03:41 +02004765 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Jiri Pirko648b4a92015-10-01 11:03:45 +02004766 err = rocker_port_vlan_dump(rocker_port,
4767 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
Jiri Pirkoe4201142016-02-16 15:14:45 +01004768 if (err)
4769 break;
4770 err = rocker_world_port_obj_vlan_dump(rocker_port,
4771 SWITCHDEV_OBJ_PORT_VLAN(obj),
4772 cb);
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004773 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004774 default:
4775 err = -EOPNOTSUPP;
4776 break;
4777 }
4778
4779 return err;
4780}
4781
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004782static const struct switchdev_ops rocker_port_switchdev_ops = {
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004783 .switchdev_port_attr_get = rocker_port_attr_get,
Scott Feldman35636062015-05-10 09:47:51 -07004784 .switchdev_port_attr_set = rocker_port_attr_set,
Scott Feldman9228ad22015-05-10 09:47:54 -07004785 .switchdev_port_obj_add = rocker_port_obj_add,
4786 .switchdev_port_obj_del = rocker_port_obj_del,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004787 .switchdev_port_obj_dump = rocker_port_obj_dump,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004788};
4789
4790/********************
4791 * ethtool interface
4792 ********************/
4793
4794static int rocker_port_get_settings(struct net_device *dev,
4795 struct ethtool_cmd *ecmd)
4796{
4797 struct rocker_port *rocker_port = netdev_priv(dev);
4798
4799 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4800}
4801
4802static int rocker_port_set_settings(struct net_device *dev,
4803 struct ethtool_cmd *ecmd)
4804{
4805 struct rocker_port *rocker_port = netdev_priv(dev);
4806
4807 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4808}
4809
4810static void rocker_port_get_drvinfo(struct net_device *dev,
4811 struct ethtool_drvinfo *drvinfo)
4812{
4813 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4814 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4815}
4816
David Ahern9766e972015-01-29 20:59:33 -07004817static struct rocker_port_stats {
4818 char str[ETH_GSTRING_LEN];
4819 int type;
4820} rocker_port_stats[] = {
4821 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4822 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4823 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4824 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4825
4826 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4827 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4828 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4829 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4830};
4831
4832#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4833
4834static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4835 u8 *data)
4836{
4837 u8 *p = data;
4838 int i;
4839
4840 switch (stringset) {
4841 case ETH_SS_STATS:
4842 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4843 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4844 p += ETH_GSTRING_LEN;
4845 }
4846 break;
4847 }
4848}
4849
4850static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004851rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
David Ahern9766e972015-01-29 20:59:33 -07004852 struct rocker_desc_info *desc_info,
4853 void *priv)
4854{
4855 struct rocker_tlv *cmd_stats;
4856
4857 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4858 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4859 return -EMSGSIZE;
4860
4861 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4862 if (!cmd_stats)
4863 return -EMSGSIZE;
4864
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004865 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4866 rocker_port->pport))
David Ahern9766e972015-01-29 20:59:33 -07004867 return -EMSGSIZE;
4868
4869 rocker_tlv_nest_end(desc_info, cmd_stats);
4870
4871 return 0;
4872}
4873
4874static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004875rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004876 const struct rocker_desc_info *desc_info,
David Ahern9766e972015-01-29 20:59:33 -07004877 void *priv)
4878{
Simon Hormane5054642015-05-25 14:28:36 +09004879 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4880 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4881 const struct rocker_tlv *pattr;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004882 u32 pport;
David Ahern9766e972015-01-29 20:59:33 -07004883 u64 *data = priv;
4884 int i;
4885
4886 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4887
4888 if (!attrs[ROCKER_TLV_CMD_INFO])
4889 return -EIO;
4890
4891 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4892 attrs[ROCKER_TLV_CMD_INFO]);
4893
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004894 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
David Ahern9766e972015-01-29 20:59:33 -07004895 return -EIO;
4896
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004897 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4898 if (pport != rocker_port->pport)
David Ahern9766e972015-01-29 20:59:33 -07004899 return -EIO;
4900
4901 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4902 pattr = stats_attrs[rocker_port_stats[i].type];
4903 if (!pattr)
4904 continue;
4905
4906 data[i] = rocker_tlv_get_u64(pattr);
4907 }
4908
4909 return 0;
4910}
4911
4912static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4913 void *priv)
4914{
Jiri Pirko76c6f942015-09-24 10:02:44 +02004915 return rocker_cmd_exec(rocker_port, NULL, 0,
David Ahern9766e972015-01-29 20:59:33 -07004916 rocker_cmd_get_port_stats_prep, NULL,
4917 rocker_cmd_get_port_stats_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004918 priv);
David Ahern9766e972015-01-29 20:59:33 -07004919}
4920
4921static void rocker_port_get_stats(struct net_device *dev,
4922 struct ethtool_stats *stats, u64 *data)
4923{
4924 struct rocker_port *rocker_port = netdev_priv(dev);
4925
4926 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4927 int i;
4928
4929 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4930 data[i] = 0;
4931 }
David Ahern9766e972015-01-29 20:59:33 -07004932}
4933
4934static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4935{
4936 switch (sset) {
4937 case ETH_SS_STATS:
4938 return ROCKER_PORT_STATS_LEN;
4939 default:
4940 return -EOPNOTSUPP;
4941 }
4942}
4943
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004944static const struct ethtool_ops rocker_port_ethtool_ops = {
4945 .get_settings = rocker_port_get_settings,
4946 .set_settings = rocker_port_set_settings,
4947 .get_drvinfo = rocker_port_get_drvinfo,
4948 .get_link = ethtool_op_get_link,
David Ahern9766e972015-01-29 20:59:33 -07004949 .get_strings = rocker_port_get_strings,
4950 .get_ethtool_stats = rocker_port_get_stats,
4951 .get_sset_count = rocker_port_get_sset_count,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004952};
4953
4954/*****************
4955 * NAPI interface
4956 *****************/
4957
4958static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4959{
4960 return container_of(napi, struct rocker_port, napi_tx);
4961}
4962
4963static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4964{
4965 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09004966 const struct rocker *rocker = rocker_port->rocker;
4967 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004968 u32 credits = 0;
4969 int err;
4970
4971 /* Cleanup tx descriptors */
4972 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
David Ahernf2bbca52015-01-16 14:22:29 -07004973 struct sk_buff *skb;
4974
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004975 err = rocker_desc_err(desc_info);
4976 if (err && net_ratelimit())
4977 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4978 err);
4979 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
David Ahernf2bbca52015-01-16 14:22:29 -07004980
4981 skb = rocker_desc_cookie_ptr_get(desc_info);
4982 if (err == 0) {
4983 rocker_port->dev->stats.tx_packets++;
4984 rocker_port->dev->stats.tx_bytes += skb->len;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004985 } else {
David Ahernf2bbca52015-01-16 14:22:29 -07004986 rocker_port->dev->stats.tx_errors++;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004987 }
David Ahernf2bbca52015-01-16 14:22:29 -07004988
4989 dev_kfree_skb_any(skb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004990 credits++;
4991 }
4992
4993 if (credits && netif_queue_stopped(rocker_port->dev))
4994 netif_wake_queue(rocker_port->dev);
4995
4996 napi_complete(napi);
4997 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4998
4999 return 0;
5000}
5001
Simon Hormane5054642015-05-25 14:28:36 +09005002static int rocker_port_rx_proc(const struct rocker *rocker,
5003 const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005004 struct rocker_desc_info *desc_info)
5005{
Simon Hormane5054642015-05-25 14:28:36 +09005006 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005007 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
5008 size_t rx_len;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005009 u16 rx_flags = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005010
5011 if (!skb)
5012 return -ENOENT;
5013
5014 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
5015 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
5016 return -EINVAL;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005017 if (attrs[ROCKER_TLV_RX_FLAGS])
5018 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005019
5020 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
5021
5022 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
5023 skb_put(skb, rx_len);
5024 skb->protocol = eth_type_trans(skb, rocker_port->dev);
David Ahernf2bbca52015-01-16 14:22:29 -07005025
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005026 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
5027 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
5028
David Ahernf2bbca52015-01-16 14:22:29 -07005029 rocker_port->dev->stats.rx_packets++;
5030 rocker_port->dev->stats.rx_bytes += skb->len;
5031
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005032 netif_receive_skb(skb);
5033
Simon Horman534ba6a2015-06-01 13:25:04 +09005034 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005035}
5036
5037static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
5038{
5039 return container_of(napi, struct rocker_port, napi_rx);
5040}
5041
5042static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
5043{
5044 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09005045 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005046 struct rocker_desc_info *desc_info;
5047 u32 credits = 0;
5048 int err;
5049
5050 /* Process rx descriptors */
5051 while (credits < budget &&
5052 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
5053 err = rocker_desc_err(desc_info);
5054 if (err) {
5055 if (net_ratelimit())
5056 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
5057 err);
5058 } else {
5059 err = rocker_port_rx_proc(rocker, rocker_port,
5060 desc_info);
5061 if (err && net_ratelimit())
5062 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
5063 err);
5064 }
David Ahernf2bbca52015-01-16 14:22:29 -07005065 if (err)
5066 rocker_port->dev->stats.rx_errors++;
5067
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005068 rocker_desc_gen_clear(desc_info);
5069 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
5070 credits++;
5071 }
5072
5073 if (credits < budget)
5074 napi_complete(napi);
5075
5076 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
5077
5078 return credits;
5079}
5080
5081/*****************
5082 * PCI driver ops
5083 *****************/
5084
Simon Hormane5054642015-05-25 14:28:36 +09005085static void rocker_carrier_init(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005086{
Simon Hormane5054642015-05-25 14:28:36 +09005087 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005088 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
5089 bool link_up;
5090
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08005091 link_up = link_status & (1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005092 if (link_up)
5093 netif_carrier_on(rocker_port->dev);
5094 else
5095 netif_carrier_off(rocker_port->dev);
5096}
5097
Jiri Pirkoe4201142016-02-16 15:14:45 +01005098static void rocker_remove_ports(struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005099{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005100 struct rocker_port *rocker_port;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005101 int i;
5102
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005103 for (i = 0; i < rocker->port_count; i++) {
5104 rocker_port = rocker->ports[i];
Scott Feldmana0720312015-06-12 21:09:44 -07005105 if (!rocker_port)
5106 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02005107 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
Jiri Pirkoe4201142016-02-16 15:14:45 +01005108 rocker_world_port_fini(rocker_port);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005109 unregister_netdev(rocker_port->dev);
Jiri Pirkoe4201142016-02-16 15:14:45 +01005110 rocker_world_port_post_fini(rocker_port);
Ido Schimmel1ebd47e2015-08-02 19:29:16 +02005111 free_netdev(rocker_port->dev);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005112 }
Jiri Pirkoe4201142016-02-16 15:14:45 +01005113 rocker_world_fini(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005114 kfree(rocker->ports);
5115}
5116
Simon Horman534ba6a2015-06-01 13:25:04 +09005117static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005118{
Simon Horman534ba6a2015-06-01 13:25:04 +09005119 const struct rocker *rocker = rocker_port->rocker;
Simon Hormane5054642015-05-25 14:28:36 +09005120 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005121 int err;
5122
5123 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
5124 rocker_port->dev->dev_addr);
5125 if (err) {
5126 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
5127 eth_hw_addr_random(rocker_port->dev);
5128 }
5129}
5130
5131static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
5132{
Simon Hormane5054642015-05-25 14:28:36 +09005133 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005134 struct rocker_port *rocker_port;
5135 struct net_device *dev;
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005136 u16 untagged_vid = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005137 int err;
5138
5139 dev = alloc_etherdev(sizeof(struct rocker_port));
5140 if (!dev)
5141 return -ENOMEM;
5142 rocker_port = netdev_priv(dev);
5143 rocker_port->dev = dev;
5144 rocker_port->rocker = rocker;
5145 rocker_port->port_number = port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08005146 rocker_port->pport = port_number + 1;
Scott Feldman5111f802014-11-28 14:34:30 +01005147 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
Scott Feldmane7335702015-09-23 08:39:17 -07005148 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005149
Jiri Pirkoe4201142016-02-16 15:14:45 +01005150 err = rocker_world_check_init(rocker_port);
5151 if (err) {
5152 dev_err(&pdev->dev, "world init failed\n");
5153 goto err_world_check_init;
5154 }
5155
Simon Horman534ba6a2015-06-01 13:25:04 +09005156 rocker_port_dev_addr_init(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005157 dev->netdev_ops = &rocker_port_netdev_ops;
5158 dev->ethtool_ops = &rocker_port_ethtool_ops;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07005159 dev->switchdev_ops = &rocker_port_switchdev_ops;
Eric Dumazetd64b5e82015-11-18 06:31:00 -08005160 netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
Jiri Pirko11ce2ba2016-02-16 15:14:41 +01005161 NAPI_POLL_WEIGHT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005162 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5163 NAPI_POLL_WEIGHT);
5164 rocker_carrier_init(rocker_port);
5165
Ido Schimmel21518a62015-08-02 20:56:37 +02005166 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005167
Jiri Pirkoe4201142016-02-16 15:14:45 +01005168 err = rocker_world_port_pre_init(rocker_port);
5169 if (err) {
5170 dev_err(&pdev->dev, "port world pre-init failed\n");
5171 goto err_world_port_pre_init;
5172 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005173 err = register_netdev(dev);
5174 if (err) {
5175 dev_err(&pdev->dev, "register_netdev failed\n");
5176 goto err_register_netdev;
5177 }
5178 rocker->ports[port_number] = rocker_port;
5179
Jiri Pirkoe4201142016-02-16 15:14:45 +01005180 err = rocker_world_port_init(rocker_port);
5181 if (err) {
5182 dev_err(&pdev->dev, "port world init failed\n");
5183 goto err_world_port_init;
5184 }
5185
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005186 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5187
Jiri Pirkoc1fe9222016-02-16 15:14:46 +01005188 rocker_port_set_learning(rocker_port, NULL,
5189 !!(rocker_port->brport_flags & BR_LEARNING));
Scott Feldman5111f802014-11-28 14:34:30 +01005190
Jiri Pirko76c6f942015-09-24 10:02:44 +02005191 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005192 if (err) {
Scott Feldmanff147022015-08-03 22:31:18 -07005193 netdev_err(rocker_port->dev, "install ig port table failed\n");
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005194 goto err_port_ig_tbl;
5195 }
5196
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005197 rocker_port->internal_vlan_id =
5198 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5199
Jiri Pirko76c6f942015-09-24 10:02:44 +02005200 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005201 if (err) {
5202 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5203 goto err_untagged_vlan;
5204 }
5205
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005206 return 0;
5207
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005208err_untagged_vlan:
Jiri Pirko76c6f942015-09-24 10:02:44 +02005209 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005210err_port_ig_tbl:
Jiri Pirkoe4201142016-02-16 15:14:45 +01005211 rocker_world_port_fini(rocker_port);
5212err_world_port_init:
Scott Feldman6c4f7782015-08-03 22:31:17 -07005213 rocker->ports[port_number] = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005214 unregister_netdev(dev);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005215err_register_netdev:
Jiri Pirkoe4201142016-02-16 15:14:45 +01005216 rocker_world_port_post_fini(rocker_port);
5217err_world_port_pre_init:
5218err_world_check_init:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005219 free_netdev(dev);
5220 return err;
5221}
5222
5223static int rocker_probe_ports(struct rocker *rocker)
5224{
5225 int i;
5226 size_t alloc_size;
5227 int err;
5228
5229 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
Scott Feldman27b808c2015-06-01 11:39:02 -07005230 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
Dan Carpentere65ad3b2015-02-25 16:35:32 +03005231 if (!rocker->ports)
5232 return -ENOMEM;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005233 for (i = 0; i < rocker->port_count; i++) {
5234 err = rocker_probe_port(rocker, i);
5235 if (err)
5236 goto remove_ports;
5237 }
5238 return 0;
5239
5240remove_ports:
5241 rocker_remove_ports(rocker);
5242 return err;
5243}
5244
5245static int rocker_msix_init(struct rocker *rocker)
5246{
5247 struct pci_dev *pdev = rocker->pdev;
5248 int msix_entries;
5249 int i;
5250 int err;
5251
5252 msix_entries = pci_msix_vec_count(pdev);
5253 if (msix_entries < 0)
5254 return msix_entries;
5255
5256 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5257 return -EINVAL;
5258
5259 rocker->msix_entries = kmalloc_array(msix_entries,
5260 sizeof(struct msix_entry),
5261 GFP_KERNEL);
5262 if (!rocker->msix_entries)
5263 return -ENOMEM;
5264
5265 for (i = 0; i < msix_entries; i++)
5266 rocker->msix_entries[i].entry = i;
5267
5268 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5269 if (err < 0)
5270 goto err_enable_msix;
5271
5272 return 0;
5273
5274err_enable_msix:
5275 kfree(rocker->msix_entries);
5276 return err;
5277}
5278
Simon Hormane5054642015-05-25 14:28:36 +09005279static void rocker_msix_fini(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005280{
5281 pci_disable_msix(rocker->pdev);
5282 kfree(rocker->msix_entries);
5283}
5284
5285static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5286{
5287 struct rocker *rocker;
5288 int err;
5289
5290 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5291 if (!rocker)
5292 return -ENOMEM;
5293
5294 err = pci_enable_device(pdev);
5295 if (err) {
5296 dev_err(&pdev->dev, "pci_enable_device failed\n");
5297 goto err_pci_enable_device;
5298 }
5299
5300 err = pci_request_regions(pdev, rocker_driver_name);
5301 if (err) {
5302 dev_err(&pdev->dev, "pci_request_regions failed\n");
5303 goto err_pci_request_regions;
5304 }
5305
5306 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5307 if (!err) {
5308 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5309 if (err) {
5310 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5311 goto err_pci_set_dma_mask;
5312 }
5313 } else {
5314 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5315 if (err) {
5316 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5317 goto err_pci_set_dma_mask;
5318 }
5319 }
5320
5321 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5322 dev_err(&pdev->dev, "invalid PCI region size\n");
Wei Yongjun3122a922015-04-16 20:21:02 +08005323 err = -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005324 goto err_pci_resource_len_check;
5325 }
5326
5327 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5328 pci_resource_len(pdev, 0));
5329 if (!rocker->hw_addr) {
5330 dev_err(&pdev->dev, "ioremap failed\n");
5331 err = -EIO;
5332 goto err_ioremap;
5333 }
5334 pci_set_master(pdev);
5335
5336 rocker->pdev = pdev;
5337 pci_set_drvdata(pdev, rocker);
5338
5339 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5340
5341 err = rocker_msix_init(rocker);
5342 if (err) {
5343 dev_err(&pdev->dev, "MSI-X init failed\n");
5344 goto err_msix_init;
5345 }
5346
5347 err = rocker_basic_hw_test(rocker);
5348 if (err) {
5349 dev_err(&pdev->dev, "basic hw test failed\n");
5350 goto err_basic_hw_test;
5351 }
5352
5353 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5354
5355 err = rocker_dma_rings_init(rocker);
5356 if (err)
5357 goto err_dma_rings_init;
5358
5359 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5360 rocker_cmd_irq_handler, 0,
5361 rocker_driver_name, rocker);
5362 if (err) {
5363 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5364 goto err_request_cmd_irq;
5365 }
5366
5367 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5368 rocker_event_irq_handler, 0,
5369 rocker_driver_name, rocker);
5370 if (err) {
5371 dev_err(&pdev->dev, "cannot assign event irq\n");
5372 goto err_request_event_irq;
5373 }
5374
5375 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5376
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005377 err = rocker_init_tbls(rocker);
5378 if (err) {
5379 dev_err(&pdev->dev, "cannot init rocker tables\n");
5380 goto err_init_tbls;
5381 }
5382
Scott Feldman52fe3e22015-09-23 08:39:18 -07005383 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5384 (unsigned long) rocker);
5385 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5386
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005387 err = rocker_probe_ports(rocker);
5388 if (err) {
5389 dev_err(&pdev->dev, "failed to probe ports\n");
5390 goto err_probe_ports;
5391 }
5392
Scott Feldmanc8beb5b2015-08-12 18:44:13 -07005393 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5394 (int)sizeof(rocker->hw.id), &rocker->hw.id);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005395
5396 return 0;
5397
5398err_probe_ports:
Scott Feldman52fe3e22015-09-23 08:39:18 -07005399 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005400 rocker_free_tbls(rocker);
5401err_init_tbls:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005402 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5403err_request_event_irq:
5404 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5405err_request_cmd_irq:
5406 rocker_dma_rings_fini(rocker);
5407err_dma_rings_init:
5408err_basic_hw_test:
5409 rocker_msix_fini(rocker);
5410err_msix_init:
5411 iounmap(rocker->hw_addr);
5412err_ioremap:
5413err_pci_resource_len_check:
5414err_pci_set_dma_mask:
5415 pci_release_regions(pdev);
5416err_pci_request_regions:
5417 pci_disable_device(pdev);
5418err_pci_enable_device:
5419 kfree(rocker);
5420 return err;
5421}
5422
5423static void rocker_remove(struct pci_dev *pdev)
5424{
5425 struct rocker *rocker = pci_get_drvdata(pdev);
5426
Scott Feldman52fe3e22015-09-23 08:39:18 -07005427 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005428 rocker_free_tbls(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005429 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5430 rocker_remove_ports(rocker);
5431 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5432 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5433 rocker_dma_rings_fini(rocker);
5434 rocker_msix_fini(rocker);
5435 iounmap(rocker->hw_addr);
5436 pci_release_regions(rocker->pdev);
5437 pci_disable_device(rocker->pdev);
5438 kfree(rocker);
5439}
5440
5441static struct pci_driver rocker_pci_driver = {
5442 .name = rocker_driver_name,
5443 .id_table = rocker_pci_id_table,
5444 .probe = rocker_probe,
5445 .remove = rocker_remove,
5446};
5447
Scott Feldman6c707942014-11-28 14:34:28 +01005448/************************************
5449 * Net device notifier event handler
5450 ************************************/
5451
Simon Hormane5054642015-05-25 14:28:36 +09005452static bool rocker_port_dev_check(const struct net_device *dev)
Scott Feldman6c707942014-11-28 14:34:28 +01005453{
5454 return dev->netdev_ops == &rocker_port_netdev_ops;
5455}
5456
5457static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5458 struct net_device *bridge)
5459{
Scott Feldman027e00d2015-06-01 11:39:05 -07005460 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005461 int err;
5462
Scott Feldman027e00d2015-06-01 11:39:05 -07005463 /* Port is joining bridge, so the internal VLAN for the
5464 * port is going to change to the bridge internal VLAN.
5465 * Let's remove untagged VLAN (vid=0) from port and
5466 * re-add once internal VLAN has changed.
5467 */
5468
5469 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5470 if (err)
5471 return err;
5472
Simon Hormandf6a2062015-05-21 12:40:17 +09005473 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005474 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005475 rocker_port->internal_vlan_id =
5476 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005477
5478 rocker_port->bridge_dev = bridge;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005479 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
Scott Feldman6c707942014-11-28 14:34:28 +01005480
Jiri Pirko76c6f942015-09-24 10:02:44 +02005481 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005482}
5483
5484static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5485{
Scott Feldman027e00d2015-06-01 11:39:05 -07005486 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005487 int err;
5488
Scott Feldman027e00d2015-06-01 11:39:05 -07005489 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5490 if (err)
5491 return err;
5492
Simon Hormandf6a2062015-05-21 12:40:17 +09005493 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005494 rocker_port->bridge_dev->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005495 rocker_port->internal_vlan_id =
5496 rocker_port_internal_vlan_id_get(rocker_port,
5497 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005498
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005499 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5500 false);
Scott Feldman027e00d2015-06-01 11:39:05 -07005501 rocker_port->bridge_dev = NULL;
5502
Jiri Pirko76c6f942015-09-24 10:02:44 +02005503 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldmane47172a2015-02-25 20:15:38 -08005504 if (err)
5505 return err;
5506
5507 if (rocker_port->dev->flags & IFF_UP)
Jiri Pirko76c6f942015-09-24 10:02:44 +02005508 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005509
5510 return err;
5511}
5512
Simon Horman82549732015-07-16 10:39:14 +09005513static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5514 struct net_device *master)
5515{
5516 int err;
5517
5518 rocker_port->bridge_dev = master;
5519
Jiri Pirko76c6f942015-09-24 10:02:44 +02005520 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
Simon Horman82549732015-07-16 10:39:14 +09005521 if (err)
5522 return err;
Jiri Pirko76c6f942015-09-24 10:02:44 +02005523 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Simon Horman82549732015-07-16 10:39:14 +09005524
5525 return err;
5526}
5527
Jiri Pirko686ed302015-08-27 09:31:23 +02005528static int rocker_port_master_linked(struct rocker_port *rocker_port,
5529 struct net_device *master)
Scott Feldman6c707942014-11-28 14:34:28 +01005530{
Scott Feldman6c707942014-11-28 14:34:28 +01005531 int err = 0;
5532
Jiri Pirko686ed302015-08-27 09:31:23 +02005533 if (netif_is_bridge_master(master))
5534 err = rocker_port_bridge_join(rocker_port, master);
5535 else if (netif_is_ovs_master(master))
5536 err = rocker_port_ovs_changed(rocker_port, master);
5537 return err;
5538}
Scott Feldman6c707942014-11-28 14:34:28 +01005539
Jiri Pirko686ed302015-08-27 09:31:23 +02005540static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5541{
5542 int err = 0;
5543
5544 if (rocker_port_is_bridged(rocker_port))
5545 err = rocker_port_bridge_leave(rocker_port);
5546 else if (rocker_port_is_ovsed(rocker_port))
5547 err = rocker_port_ovs_changed(rocker_port, NULL);
Scott Feldman6c707942014-11-28 14:34:28 +01005548 return err;
5549}
5550
5551static int rocker_netdevice_event(struct notifier_block *unused,
5552 unsigned long event, void *ptr)
5553{
Jiri Pirko686ed302015-08-27 09:31:23 +02005554 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5555 struct netdev_notifier_changeupper_info *info;
5556 struct rocker_port *rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01005557 int err;
5558
Jiri Pirko686ed302015-08-27 09:31:23 +02005559 if (!rocker_port_dev_check(dev))
5560 return NOTIFY_DONE;
5561
Scott Feldman6c707942014-11-28 14:34:28 +01005562 switch (event) {
5563 case NETDEV_CHANGEUPPER:
Jiri Pirko686ed302015-08-27 09:31:23 +02005564 info = ptr;
5565 if (!info->master)
5566 goto out;
5567 rocker_port = netdev_priv(dev);
5568 if (info->linking) {
Jiri Pirkoe4201142016-02-16 15:14:45 +01005569 err = rocker_world_port_master_linked(rocker_port,
5570 info->upper_dev);
5571 if (err)
5572 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5573 err);
Jiri Pirko686ed302015-08-27 09:31:23 +02005574 err = rocker_port_master_linked(rocker_port,
5575 info->upper_dev);
5576 if (err)
5577 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5578 err);
5579 } else {
Jiri Pirkoe4201142016-02-16 15:14:45 +01005580 err = rocker_world_port_master_unlinked(rocker_port,
5581 info->upper_dev);
5582 if (err)
5583 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5584 err);
Jiri Pirko686ed302015-08-27 09:31:23 +02005585 err = rocker_port_master_unlinked(rocker_port);
5586 if (err)
5587 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5588 err);
5589 }
Scott Feldman6c707942014-11-28 14:34:28 +01005590 break;
5591 }
Jiri Pirko686ed302015-08-27 09:31:23 +02005592out:
Scott Feldman6c707942014-11-28 14:34:28 +01005593 return NOTIFY_DONE;
5594}
5595
5596static struct notifier_block rocker_netdevice_nb __read_mostly = {
5597 .notifier_call = rocker_netdevice_event,
5598};
5599
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005600/************************************
5601 * Net event notifier event handler
5602 ************************************/
5603
5604static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5605{
5606 struct rocker_port *rocker_port = netdev_priv(dev);
Scott Feldman02a9fbf2015-06-12 21:35:47 -07005607 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5608 ROCKER_OP_FLAG_NOWAIT;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005609 __be32 ip_addr = *(__be32 *)n->primary_key;
5610
Jiri Pirko76c6f942015-09-24 10:02:44 +02005611 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005612}
5613
5614static int rocker_netevent_event(struct notifier_block *unused,
5615 unsigned long event, void *ptr)
5616{
Jiri Pirkoe4201142016-02-16 15:14:45 +01005617 struct rocker_port *rocker_port;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005618 struct net_device *dev;
5619 struct neighbour *n = ptr;
5620 int err;
5621
5622 switch (event) {
5623 case NETEVENT_NEIGH_UPDATE:
5624 if (n->tbl != &arp_tbl)
5625 return NOTIFY_DONE;
5626 dev = n->dev;
5627 if (!rocker_port_dev_check(dev))
5628 return NOTIFY_DONE;
Jiri Pirkoe4201142016-02-16 15:14:45 +01005629 rocker_port = netdev_priv(dev);
5630 err = rocker_world_port_neigh_update(rocker_port, n);
5631 if (err)
5632 netdev_warn(dev, "failed to handle neigh update (err %d)\n",
5633 err);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005634 err = rocker_neigh_update(dev, n);
5635 if (err)
5636 netdev_warn(dev,
5637 "failed to handle neigh update (err %d)\n",
5638 err);
5639 break;
5640 }
5641
5642 return NOTIFY_DONE;
5643}
5644
5645static struct notifier_block rocker_netevent_nb __read_mostly = {
5646 .notifier_call = rocker_netevent_event,
5647};
5648
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005649/***********************
5650 * Module init and exit
5651 ***********************/
5652
5653static int __init rocker_module_init(void)
5654{
Scott Feldman6c707942014-11-28 14:34:28 +01005655 int err;
5656
5657 register_netdevice_notifier(&rocker_netdevice_nb);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005658 register_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005659 err = pci_register_driver(&rocker_pci_driver);
5660 if (err)
5661 goto err_pci_register_driver;
5662 return 0;
5663
5664err_pci_register_driver:
Gilad Ben-Yossefa076e6b2015-06-23 10:52:10 +03005665 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005666 unregister_netdevice_notifier(&rocker_netdevice_nb);
5667 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005668}
5669
5670static void __exit rocker_module_exit(void)
5671{
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005672 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005673 unregister_netdevice_notifier(&rocker_netdevice_nb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005674 pci_unregister_driver(&rocker_pci_driver);
5675}
5676
5677module_init(rocker_module_init);
5678module_exit(rocker_module_exit);
5679
5680MODULE_LICENSE("GPL v2");
5681MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5682MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5683MODULE_DESCRIPTION("Rocker switch device driver");
5684MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);