blob: be8bb04b7355a870cc95890f3f6abd5ba1aec85c [file] [log] [blame]
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010019#include <linux/hashtable.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010020#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
Scott Feldman6c707942014-11-28 14:34:28 +010031#include <linux/if_bridge.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010032#include <linux/bitops.h>
David Aherndb191702015-03-17 20:23:16 -060033#include <linux/ctype.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010034#include <net/switchdev.h>
35#include <net/rtnetlink.h>
Scott Feldmanc1beeef2015-03-05 21:21:20 -080036#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010039#include <asm-generic/io-64-nonatomic-lo-hi.h>
40#include <generated/utsrelease.h>
41
42#include "rocker.h"
43
44static const char rocker_driver_name[] = "rocker";
45
46static const struct pci_device_id rocker_pci_id_table[] = {
47 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
48 {0, }
49};
50
Scott Feldman9f6bbf72014-11-28 14:34:27 +010051struct rocker_flow_tbl_key {
52 u32 priority;
53 enum rocker_of_dpa_table_id tbl_id;
54 union {
55 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080056 u32 in_pport;
57 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010058 enum rocker_of_dpa_table_id goto_tbl;
59 } ig_port;
60 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080061 u32 in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010062 __be16 vlan_id;
63 __be16 vlan_id_mask;
64 enum rocker_of_dpa_table_id goto_tbl;
65 bool untagged;
66 __be16 new_vlan_id;
67 } vlan;
68 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080069 u32 in_pport;
70 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010071 __be16 eth_type;
72 u8 eth_dst[ETH_ALEN];
73 u8 eth_dst_mask[ETH_ALEN];
74 __be16 vlan_id;
75 __be16 vlan_id_mask;
76 enum rocker_of_dpa_table_id goto_tbl;
77 bool copy_to_cpu;
78 } term_mac;
79 struct {
80 __be16 eth_type;
81 __be32 dst4;
82 __be32 dst4_mask;
83 enum rocker_of_dpa_table_id goto_tbl;
84 u32 group_id;
85 } ucast_routing;
86 struct {
87 u8 eth_dst[ETH_ALEN];
88 u8 eth_dst_mask[ETH_ALEN];
89 int has_eth_dst;
90 int has_eth_dst_mask;
91 __be16 vlan_id;
92 u32 tunnel_id;
93 enum rocker_of_dpa_table_id goto_tbl;
94 u32 group_id;
95 bool copy_to_cpu;
96 } bridge;
97 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080098 u32 in_pport;
99 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100100 u8 eth_src[ETH_ALEN];
101 u8 eth_src_mask[ETH_ALEN];
102 u8 eth_dst[ETH_ALEN];
103 u8 eth_dst_mask[ETH_ALEN];
104 __be16 eth_type;
105 __be16 vlan_id;
106 __be16 vlan_id_mask;
107 u8 ip_proto;
108 u8 ip_proto_mask;
109 u8 ip_tos;
110 u8 ip_tos_mask;
111 u32 group_id;
112 } acl;
113 };
114};
115
116struct rocker_flow_tbl_entry {
117 struct hlist_node entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800118 u32 cmd;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100119 u64 cookie;
120 struct rocker_flow_tbl_key key;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800121 size_t key_len;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100122 u32 key_crc32; /* key */
123};
124
125struct rocker_group_tbl_entry {
126 struct hlist_node entry;
127 u32 cmd;
128 u32 group_id; /* key */
129 u16 group_count;
130 u32 *group_ids;
131 union {
132 struct {
133 u8 pop_vlan;
134 } l2_interface;
135 struct {
136 u8 eth_src[ETH_ALEN];
137 u8 eth_dst[ETH_ALEN];
138 __be16 vlan_id;
139 u32 group_id;
140 } l2_rewrite;
141 struct {
142 u8 eth_src[ETH_ALEN];
143 u8 eth_dst[ETH_ALEN];
144 __be16 vlan_id;
145 bool ttl_check;
146 u32 group_id;
147 } l3_unicast;
148 };
149};
150
151struct rocker_fdb_tbl_entry {
152 struct hlist_node entry;
153 u32 key_crc32; /* key */
154 bool learned;
Scott Feldmana471be42015-09-23 08:39:14 -0700155 unsigned long touched;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100156 struct rocker_fdb_tbl_key {
Scott Feldman4c660492015-09-23 08:39:15 -0700157 struct rocker_port *rocker_port;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100158 u8 addr[ETH_ALEN];
159 __be16 vlan_id;
160 } key;
161};
162
163struct rocker_internal_vlan_tbl_entry {
164 struct hlist_node entry;
165 int ifindex; /* key */
166 u32 ref_count;
167 __be16 vlan_id;
168};
169
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800170struct rocker_neigh_tbl_entry {
171 struct hlist_node entry;
172 __be32 ip_addr; /* key */
173 struct net_device *dev;
174 u32 ref_count;
175 u32 index;
176 u8 eth_dst[ETH_ALEN];
177 bool ttl_check;
178};
179
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100180struct rocker_desc_info {
181 char *data; /* mapped */
182 size_t data_size;
183 size_t tlv_size;
184 struct rocker_desc *desc;
Scott Feldman4725ceb2015-05-10 09:48:08 -0700185 dma_addr_t mapaddr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100186};
187
188struct rocker_dma_ring_info {
189 size_t size;
190 u32 head;
191 u32 tail;
192 struct rocker_desc *desc; /* mapped */
193 dma_addr_t mapaddr;
194 struct rocker_desc_info *desc_info;
195 unsigned int type;
196};
197
198struct rocker;
199
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100200enum {
201 ROCKER_CTRL_LINK_LOCAL_MCAST,
202 ROCKER_CTRL_LOCAL_ARP,
203 ROCKER_CTRL_IPV4_MCAST,
204 ROCKER_CTRL_IPV6_MCAST,
205 ROCKER_CTRL_DFLT_BRIDGING,
Simon Horman82549732015-07-16 10:39:14 +0900206 ROCKER_CTRL_DFLT_OVS,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100207 ROCKER_CTRL_MAX,
208};
209
210#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
211#define ROCKER_N_INTERNAL_VLANS 255
212#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
213#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
214
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100215struct rocker_port {
216 struct net_device *dev;
Scott Feldman6c707942014-11-28 14:34:28 +0100217 struct net_device *bridge_dev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100218 struct rocker *rocker;
219 unsigned int port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800220 u32 pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100221 __be16 internal_vlan_id;
Scott Feldman6c707942014-11-28 14:34:28 +0100222 int stp_state;
Scott Feldman5111f802014-11-28 14:34:30 +0100223 u32 brport_flags;
Scott Feldmane7335702015-09-23 08:39:17 -0700224 unsigned long ageing_time;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100225 bool ctrls[ROCKER_CTRL_MAX];
226 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100227 struct napi_struct napi_tx;
228 struct napi_struct napi_rx;
229 struct rocker_dma_ring_info tx_ring;
230 struct rocker_dma_ring_info rx_ring;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700231 struct list_head trans_mem;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100232};
233
234struct rocker {
235 struct pci_dev *pdev;
236 u8 __iomem *hw_addr;
237 struct msix_entry *msix_entries;
238 unsigned int port_count;
239 struct rocker_port **ports;
240 struct {
241 u64 id;
242 } hw;
Scott Feldman4725ceb2015-05-10 09:48:08 -0700243 spinlock_t cmd_ring_lock; /* for cmd ring accesses */
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100244 struct rocker_dma_ring_info cmd_ring;
245 struct rocker_dma_ring_info event_ring;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100246 DECLARE_HASHTABLE(flow_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700247 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100248 u64 flow_tbl_next_cookie;
249 DECLARE_HASHTABLE(group_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700250 spinlock_t group_tbl_lock; /* for group tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100251 DECLARE_HASHTABLE(fdb_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700252 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100253 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
254 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700255 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800256 DECLARE_HASHTABLE(neigh_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700257 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800258 u32 neigh_tbl_next_index;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100259};
260
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100261static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
262static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
263static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
264static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
265static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
266static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
267static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
268static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
269static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
270
271/* Rocker priority levels for flow table entries. Higher
272 * priority match takes precedence over lower priority match.
273 */
274
275enum {
276 ROCKER_PRIORITY_UNKNOWN = 0,
277 ROCKER_PRIORITY_IG_PORT = 1,
278 ROCKER_PRIORITY_VLAN = 1,
279 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
280 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100281 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
282 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
283 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
284 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
285 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
286 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
287 ROCKER_PRIORITY_ACL_CTRL = 3,
288 ROCKER_PRIORITY_ACL_NORMAL = 2,
289 ROCKER_PRIORITY_ACL_DFLT = 1,
290};
291
292static bool rocker_vlan_id_is_internal(__be16 vlan_id)
293{
294 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
295 u16 end = 0xffe;
296 u16 _vlan_id = ntohs(vlan_id);
297
298 return (_vlan_id >= start && _vlan_id <= end);
299}
300
Simon Hormane5054642015-05-25 14:28:36 +0900301static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100302 u16 vid, bool *pop_vlan)
303{
304 __be16 vlan_id;
305
306 if (pop_vlan)
307 *pop_vlan = false;
308 vlan_id = htons(vid);
309 if (!vlan_id) {
310 vlan_id = rocker_port->internal_vlan_id;
311 if (pop_vlan)
312 *pop_vlan = true;
313 }
314
315 return vlan_id;
316}
317
Simon Hormane5054642015-05-25 14:28:36 +0900318static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +0100319 __be16 vlan_id)
320{
321 if (rocker_vlan_id_is_internal(vlan_id))
322 return 0;
323
324 return ntohs(vlan_id);
325}
326
Simon Hormane5054642015-05-25 14:28:36 +0900327static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +0100328{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200329 return rocker_port->bridge_dev &&
330 netif_is_bridge_master(rocker_port->bridge_dev);
Simon Horman82549732015-07-16 10:39:14 +0900331}
332
333static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
334{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200335 return rocker_port->bridge_dev &&
336 netif_is_ovs_master(rocker_port->bridge_dev);
Scott Feldman6c707942014-11-28 14:34:28 +0100337}
338
Scott Feldman179f9a22015-06-12 21:35:46 -0700339#define ROCKER_OP_FLAG_REMOVE BIT(0)
340#define ROCKER_OP_FLAG_NOWAIT BIT(1)
341#define ROCKER_OP_FLAG_LEARNED BIT(2)
342#define ROCKER_OP_FLAG_REFRESH BIT(3)
343
Scott Feldmanc4f20322015-05-10 09:47:50 -0700344static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700345 enum switchdev_trans trans, int flags,
346 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700347{
348 struct list_head *elem = NULL;
Scott Feldman179f9a22015-06-12 21:35:46 -0700349 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
350 GFP_ATOMIC : GFP_KERNEL;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700351
352 /* If in transaction prepare phase, allocate the memory
353 * and enqueue it on a per-port list. If in transaction
354 * commit phase, dequeue the memory from the per-port list
355 * rather than re-allocating the memory. The idea is the
356 * driver code paths for prepare and commit are identical
357 * so the memory allocated in the prepare phase is the
358 * memory used in the commit phase.
359 */
360
361 switch (trans) {
362 case SWITCHDEV_TRANS_PREPARE:
Scott Feldman179f9a22015-06-12 21:35:46 -0700363 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700364 if (!elem)
365 return NULL;
366 list_add_tail(elem, &rocker_port->trans_mem);
367 break;
368 case SWITCHDEV_TRANS_COMMIT:
369 BUG_ON(list_empty(&rocker_port->trans_mem));
370 elem = rocker_port->trans_mem.next;
371 list_del_init(elem);
372 break;
373 case SWITCHDEV_TRANS_NONE:
Scott Feldman179f9a22015-06-12 21:35:46 -0700374 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700375 if (elem)
376 INIT_LIST_HEAD(elem);
377 break;
378 default:
379 break;
380 }
381
382 return elem ? elem + 1 : NULL;
383}
384
385static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700386 enum switchdev_trans trans, int flags,
387 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700388{
Scott Feldman179f9a22015-06-12 21:35:46 -0700389 return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700390}
391
392static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700393 enum switchdev_trans trans, int flags,
394 size_t n, size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700395{
Scott Feldman179f9a22015-06-12 21:35:46 -0700396 return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700397}
398
Simon Horman0985df72015-05-25 14:28:35 +0900399static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700400{
401 struct list_head *elem;
402
403 /* Frees are ignored if in transaction prepare phase. The
404 * memory remains on the per-port list until freed in the
405 * commit phase.
406 */
407
408 if (trans == SWITCHDEV_TRANS_PREPARE)
409 return;
410
411 elem = (struct list_head *)mem - 1;
412 BUG_ON(!list_empty(elem));
413 kfree(elem);
414}
415
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100416struct rocker_wait {
417 wait_queue_head_t wait;
418 bool done;
Scott Feldman179f9a22015-06-12 21:35:46 -0700419 bool nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100420};
421
422static void rocker_wait_reset(struct rocker_wait *wait)
423{
424 wait->done = false;
Scott Feldman179f9a22015-06-12 21:35:46 -0700425 wait->nowait = false;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100426}
427
428static void rocker_wait_init(struct rocker_wait *wait)
429{
430 init_waitqueue_head(&wait->wait);
431 rocker_wait_reset(wait);
432}
433
Scott Feldmanc4f20322015-05-10 09:47:50 -0700434static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700435 enum switchdev_trans trans,
436 int flags)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100437{
438 struct rocker_wait *wait;
439
Scott Feldman179f9a22015-06-12 21:35:46 -0700440 wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100441 if (!wait)
442 return NULL;
443 rocker_wait_init(wait);
444 return wait;
445}
446
Simon Horman0985df72015-05-25 14:28:35 +0900447static void rocker_wait_destroy(enum switchdev_trans trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -0700448 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100449{
Simon Horman0985df72015-05-25 14:28:35 +0900450 rocker_port_kfree(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100451}
452
453static bool rocker_wait_event_timeout(struct rocker_wait *wait,
454 unsigned long timeout)
455{
456 wait_event_timeout(wait->wait, wait->done, HZ / 10);
457 if (!wait->done)
458 return false;
459 return true;
460}
461
462static void rocker_wait_wake_up(struct rocker_wait *wait)
463{
464 wait->done = true;
465 wake_up(&wait->wait);
466}
467
Simon Hormane5054642015-05-25 14:28:36 +0900468static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100469{
470 return rocker->msix_entries[vector].vector;
471}
472
Simon Hormane5054642015-05-25 14:28:36 +0900473static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100474{
475 return rocker_msix_vector(rocker_port->rocker,
476 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
477}
478
Simon Hormane5054642015-05-25 14:28:36 +0900479static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100480{
481 return rocker_msix_vector(rocker_port->rocker,
482 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
483}
484
485#define rocker_write32(rocker, reg, val) \
486 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
487#define rocker_read32(rocker, reg) \
488 readl((rocker)->hw_addr + (ROCKER_ ## reg))
489#define rocker_write64(rocker, reg, val) \
490 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
491#define rocker_read64(rocker, reg) \
492 readq((rocker)->hw_addr + (ROCKER_ ## reg))
493
494/*****************************
495 * HW basic testing functions
496 *****************************/
497
Simon Hormane5054642015-05-25 14:28:36 +0900498static int rocker_reg_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100499{
Simon Hormane5054642015-05-25 14:28:36 +0900500 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100501 u64 test_reg;
502 u64 rnd;
503
504 rnd = prandom_u32();
505 rnd >>= 1;
506 rocker_write32(rocker, TEST_REG, rnd);
507 test_reg = rocker_read32(rocker, TEST_REG);
508 if (test_reg != rnd * 2) {
509 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
510 test_reg, rnd * 2);
511 return -EIO;
512 }
513
514 rnd = prandom_u32();
515 rnd <<= 31;
516 rnd |= prandom_u32();
517 rocker_write64(rocker, TEST_REG64, rnd);
518 test_reg = rocker_read64(rocker, TEST_REG64);
519 if (test_reg != rnd * 2) {
520 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
521 test_reg, rnd * 2);
522 return -EIO;
523 }
524
525 return 0;
526}
527
Simon Hormane5054642015-05-25 14:28:36 +0900528static int rocker_dma_test_one(const struct rocker *rocker,
529 struct rocker_wait *wait, u32 test_type,
530 dma_addr_t dma_handle, const unsigned char *buf,
531 const unsigned char *expect, size_t size)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100532{
Simon Hormane5054642015-05-25 14:28:36 +0900533 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100534 int i;
535
536 rocker_wait_reset(wait);
537 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
538
539 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
540 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
541 return -EIO;
542 }
543
544 for (i = 0; i < size; i++) {
545 if (buf[i] != expect[i]) {
546 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
547 buf[i], i, expect[i]);
548 return -EIO;
549 }
550 }
551 return 0;
552}
553
554#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
555#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
556
Simon Hormane5054642015-05-25 14:28:36 +0900557static int rocker_dma_test_offset(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100558 struct rocker_wait *wait, int offset)
559{
560 struct pci_dev *pdev = rocker->pdev;
561 unsigned char *alloc;
562 unsigned char *buf;
563 unsigned char *expect;
564 dma_addr_t dma_handle;
565 int i;
566 int err;
567
568 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
569 GFP_KERNEL | GFP_DMA);
570 if (!alloc)
571 return -ENOMEM;
572 buf = alloc + offset;
573 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
574
575 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
576 PCI_DMA_BIDIRECTIONAL);
577 if (pci_dma_mapping_error(pdev, dma_handle)) {
578 err = -EIO;
579 goto free_alloc;
580 }
581
582 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
583 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
584
585 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
586 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
587 dma_handle, buf, expect,
588 ROCKER_TEST_DMA_BUF_SIZE);
589 if (err)
590 goto unmap;
591
592 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
593 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
594 dma_handle, buf, expect,
595 ROCKER_TEST_DMA_BUF_SIZE);
596 if (err)
597 goto unmap;
598
599 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
600 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
601 expect[i] = ~buf[i];
602 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
603 dma_handle, buf, expect,
604 ROCKER_TEST_DMA_BUF_SIZE);
605 if (err)
606 goto unmap;
607
608unmap:
609 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
610 PCI_DMA_BIDIRECTIONAL);
611free_alloc:
612 kfree(alloc);
613
614 return err;
615}
616
Simon Hormane5054642015-05-25 14:28:36 +0900617static int rocker_dma_test(const struct rocker *rocker,
618 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100619{
620 int i;
621 int err;
622
623 for (i = 0; i < 8; i++) {
624 err = rocker_dma_test_offset(rocker, wait, i);
625 if (err)
626 return err;
627 }
628 return 0;
629}
630
631static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
632{
633 struct rocker_wait *wait = dev_id;
634
635 rocker_wait_wake_up(wait);
636
637 return IRQ_HANDLED;
638}
639
Simon Hormane5054642015-05-25 14:28:36 +0900640static int rocker_basic_hw_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100641{
Simon Hormane5054642015-05-25 14:28:36 +0900642 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100643 struct rocker_wait wait;
644 int err;
645
646 err = rocker_reg_test(rocker);
647 if (err) {
648 dev_err(&pdev->dev, "reg test failed\n");
649 return err;
650 }
651
652 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
653 rocker_test_irq_handler, 0,
654 rocker_driver_name, &wait);
655 if (err) {
656 dev_err(&pdev->dev, "cannot assign test irq\n");
657 return err;
658 }
659
660 rocker_wait_init(&wait);
661 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
662
663 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
664 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
665 err = -EIO;
666 goto free_irq;
667 }
668
669 err = rocker_dma_test(rocker, &wait);
670 if (err)
671 dev_err(&pdev->dev, "dma test failed\n");
672
673free_irq:
674 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
675 return err;
676}
677
678/******
679 * TLV
680 ******/
681
682#define ROCKER_TLV_ALIGNTO 8U
683#define ROCKER_TLV_ALIGN(len) \
684 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
685#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
686
687/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
688 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
689 * | Header | Pad | Payload | Pad |
690 * | (struct rocker_tlv) | ing | | ing |
691 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
692 * <--------------------------- tlv->len -------------------------->
693 */
694
695static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
696 int *remaining)
697{
698 int totlen = ROCKER_TLV_ALIGN(tlv->len);
699
700 *remaining -= totlen;
701 return (struct rocker_tlv *) ((char *) tlv + totlen);
702}
703
704static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
705{
706 return remaining >= (int) ROCKER_TLV_HDRLEN &&
707 tlv->len >= ROCKER_TLV_HDRLEN &&
708 tlv->len <= remaining;
709}
710
711#define rocker_tlv_for_each(pos, head, len, rem) \
712 for (pos = head, rem = len; \
713 rocker_tlv_ok(pos, rem); \
714 pos = rocker_tlv_next(pos, &(rem)))
715
716#define rocker_tlv_for_each_nested(pos, tlv, rem) \
717 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
718 rocker_tlv_len(tlv), rem)
719
720static int rocker_tlv_attr_size(int payload)
721{
722 return ROCKER_TLV_HDRLEN + payload;
723}
724
725static int rocker_tlv_total_size(int payload)
726{
727 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
728}
729
730static int rocker_tlv_padlen(int payload)
731{
732 return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
733}
734
735static int rocker_tlv_type(const struct rocker_tlv *tlv)
736{
737 return tlv->type;
738}
739
740static void *rocker_tlv_data(const struct rocker_tlv *tlv)
741{
742 return (char *) tlv + ROCKER_TLV_HDRLEN;
743}
744
745static int rocker_tlv_len(const struct rocker_tlv *tlv)
746{
747 return tlv->len - ROCKER_TLV_HDRLEN;
748}
749
750static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
751{
752 return *(u8 *) rocker_tlv_data(tlv);
753}
754
755static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
756{
757 return *(u16 *) rocker_tlv_data(tlv);
758}
759
Jiri Pirko9b03c712014-12-03 14:14:53 +0100760static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
761{
762 return *(__be16 *) rocker_tlv_data(tlv);
763}
764
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100765static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
766{
767 return *(u32 *) rocker_tlv_data(tlv);
768}
769
770static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
771{
772 return *(u64 *) rocker_tlv_data(tlv);
773}
774
Simon Hormane5054642015-05-25 14:28:36 +0900775static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100776 const char *buf, int buf_len)
777{
778 const struct rocker_tlv *tlv;
779 const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
780 int rem;
781
782 memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
783
784 rocker_tlv_for_each(tlv, head, buf_len, rem) {
785 u32 type = rocker_tlv_type(tlv);
786
787 if (type > 0 && type <= maxtype)
Simon Hormane5054642015-05-25 14:28:36 +0900788 tb[type] = tlv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100789 }
790}
791
Simon Hormane5054642015-05-25 14:28:36 +0900792static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100793 const struct rocker_tlv *tlv)
794{
795 rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
796 rocker_tlv_len(tlv));
797}
798
Simon Hormane5054642015-05-25 14:28:36 +0900799static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
800 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100801{
802 rocker_tlv_parse(tb, maxtype, desc_info->data,
803 desc_info->desc->tlv_size);
804}
805
806static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
807{
808 return (struct rocker_tlv *) ((char *) desc_info->data +
809 desc_info->tlv_size);
810}
811
812static int rocker_tlv_put(struct rocker_desc_info *desc_info,
813 int attrtype, int attrlen, const void *data)
814{
815 int tail_room = desc_info->data_size - desc_info->tlv_size;
816 int total_size = rocker_tlv_total_size(attrlen);
817 struct rocker_tlv *tlv;
818
819 if (unlikely(tail_room < total_size))
820 return -EMSGSIZE;
821
822 tlv = rocker_tlv_start(desc_info);
823 desc_info->tlv_size += total_size;
824 tlv->type = attrtype;
825 tlv->len = rocker_tlv_attr_size(attrlen);
826 memcpy(rocker_tlv_data(tlv), data, attrlen);
827 memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
828 return 0;
829}
830
831static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
832 int attrtype, u8 value)
833{
834 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
835}
836
837static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
838 int attrtype, u16 value)
839{
840 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
841}
842
Jiri Pirko9b03c712014-12-03 14:14:53 +0100843static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
844 int attrtype, __be16 value)
845{
846 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
847}
848
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100849static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
850 int attrtype, u32 value)
851{
852 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
853}
854
Jiri Pirko9b03c712014-12-03 14:14:53 +0100855static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
856 int attrtype, __be32 value)
857{
858 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
859}
860
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100861static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
862 int attrtype, u64 value)
863{
864 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
865}
866
867static struct rocker_tlv *
868rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
869{
870 struct rocker_tlv *start = rocker_tlv_start(desc_info);
871
872 if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
873 return NULL;
874
875 return start;
876}
877
878static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
879 struct rocker_tlv *start)
880{
881 start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
882}
883
884static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +0900885 const struct rocker_tlv *start)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100886{
Simon Hormane5054642015-05-25 14:28:36 +0900887 desc_info->tlv_size = (const char *) start - desc_info->data;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100888}
889
890/******************************************
891 * DMA rings and descriptors manipulations
892 ******************************************/
893
894static u32 __pos_inc(u32 pos, size_t limit)
895{
896 return ++pos == limit ? 0 : pos;
897}
898
Simon Hormane5054642015-05-25 14:28:36 +0900899static int rocker_desc_err(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100900{
Scott Feldman7eb344f2015-02-25 20:15:36 -0800901 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
902
903 switch (err) {
904 case ROCKER_OK:
905 return 0;
906 case -ROCKER_ENOENT:
907 return -ENOENT;
908 case -ROCKER_ENXIO:
909 return -ENXIO;
910 case -ROCKER_ENOMEM:
911 return -ENOMEM;
912 case -ROCKER_EEXIST:
913 return -EEXIST;
914 case -ROCKER_EINVAL:
915 return -EINVAL;
916 case -ROCKER_EMSGSIZE:
917 return -EMSGSIZE;
918 case -ROCKER_ENOTSUP:
919 return -EOPNOTSUPP;
920 case -ROCKER_ENOBUFS:
921 return -ENOBUFS;
922 }
923
924 return -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100925}
926
Simon Hormane5054642015-05-25 14:28:36 +0900927static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100928{
929 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
930}
931
Simon Hormane5054642015-05-25 14:28:36 +0900932static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100933{
934 u32 comp_err = desc_info->desc->comp_err;
935
936 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
937}
938
Simon Hormane5054642015-05-25 14:28:36 +0900939static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100940{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100941 return (void *)(uintptr_t)desc_info->desc->cookie;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100942}
943
Simon Hormane5054642015-05-25 14:28:36 +0900944static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100945 void *ptr)
946{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100947 desc_info->desc->cookie = (uintptr_t) ptr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100948}
949
950static struct rocker_desc_info *
Simon Hormane5054642015-05-25 14:28:36 +0900951rocker_desc_head_get(const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100952{
953 static struct rocker_desc_info *desc_info;
954 u32 head = __pos_inc(info->head, info->size);
955
956 desc_info = &info->desc_info[info->head];
957 if (head == info->tail)
958 return NULL; /* ring full */
959 desc_info->tlv_size = 0;
960 return desc_info;
961}
962
Simon Hormane5054642015-05-25 14:28:36 +0900963static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100964{
965 desc_info->desc->buf_size = desc_info->data_size;
966 desc_info->desc->tlv_size = desc_info->tlv_size;
967}
968
Simon Hormane5054642015-05-25 14:28:36 +0900969static void rocker_desc_head_set(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100970 struct rocker_dma_ring_info *info,
Simon Hormane5054642015-05-25 14:28:36 +0900971 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100972{
973 u32 head = __pos_inc(info->head, info->size);
974
975 BUG_ON(head == info->tail);
976 rocker_desc_commit(desc_info);
977 info->head = head;
978 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
979}
980
981static struct rocker_desc_info *
982rocker_desc_tail_get(struct rocker_dma_ring_info *info)
983{
984 static struct rocker_desc_info *desc_info;
985
986 if (info->tail == info->head)
987 return NULL; /* nothing to be done between head and tail */
988 desc_info = &info->desc_info[info->tail];
989 if (!rocker_desc_gen(desc_info))
990 return NULL; /* gen bit not set, desc is not ready yet */
991 info->tail = __pos_inc(info->tail, info->size);
992 desc_info->tlv_size = desc_info->desc->tlv_size;
993 return desc_info;
994}
995
Simon Hormane5054642015-05-25 14:28:36 +0900996static void rocker_dma_ring_credits_set(const struct rocker *rocker,
997 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100998 u32 credits)
999{
1000 if (credits)
1001 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
1002}
1003
1004static unsigned long rocker_dma_ring_size_fix(size_t size)
1005{
1006 return max(ROCKER_DMA_SIZE_MIN,
1007 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
1008}
1009
Simon Hormane5054642015-05-25 14:28:36 +09001010static int rocker_dma_ring_create(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001011 unsigned int type,
1012 size_t size,
1013 struct rocker_dma_ring_info *info)
1014{
1015 int i;
1016
1017 BUG_ON(size != rocker_dma_ring_size_fix(size));
1018 info->size = size;
1019 info->type = type;
1020 info->head = 0;
1021 info->tail = 0;
1022 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
1023 GFP_KERNEL);
1024 if (!info->desc_info)
1025 return -ENOMEM;
1026
1027 info->desc = pci_alloc_consistent(rocker->pdev,
1028 info->size * sizeof(*info->desc),
1029 &info->mapaddr);
1030 if (!info->desc) {
1031 kfree(info->desc_info);
1032 return -ENOMEM;
1033 }
1034
1035 for (i = 0; i < info->size; i++)
1036 info->desc_info[i].desc = &info->desc[i];
1037
1038 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
1039 ROCKER_DMA_DESC_CTRL_RESET);
1040 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
1041 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
1042
1043 return 0;
1044}
1045
Simon Hormane5054642015-05-25 14:28:36 +09001046static void rocker_dma_ring_destroy(const struct rocker *rocker,
1047 const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001048{
1049 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
1050
1051 pci_free_consistent(rocker->pdev,
1052 info->size * sizeof(struct rocker_desc),
1053 info->desc, info->mapaddr);
1054 kfree(info->desc_info);
1055}
1056
Simon Hormane5054642015-05-25 14:28:36 +09001057static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001058 struct rocker_dma_ring_info *info)
1059{
1060 int i;
1061
1062 BUG_ON(info->head || info->tail);
1063
1064 /* When ring is consumer, we need to advance head for each desc.
1065 * That tells hw that the desc is ready to be used by it.
1066 */
1067 for (i = 0; i < info->size - 1; i++)
1068 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
1069 rocker_desc_commit(&info->desc_info[i]);
1070}
1071
Simon Hormane5054642015-05-25 14:28:36 +09001072static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
1073 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001074 int direction, size_t buf_size)
1075{
1076 struct pci_dev *pdev = rocker->pdev;
1077 int i;
1078 int err;
1079
1080 for (i = 0; i < info->size; i++) {
1081 struct rocker_desc_info *desc_info = &info->desc_info[i];
1082 struct rocker_desc *desc = &info->desc[i];
1083 dma_addr_t dma_handle;
1084 char *buf;
1085
1086 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
1087 if (!buf) {
1088 err = -ENOMEM;
1089 goto rollback;
1090 }
1091
1092 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1093 if (pci_dma_mapping_error(pdev, dma_handle)) {
1094 kfree(buf);
1095 err = -EIO;
1096 goto rollback;
1097 }
1098
1099 desc_info->data = buf;
1100 desc_info->data_size = buf_size;
1101 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1102
1103 desc->buf_addr = dma_handle;
1104 desc->buf_size = buf_size;
1105 }
1106 return 0;
1107
1108rollback:
1109 for (i--; i >= 0; i--) {
Simon Hormane5054642015-05-25 14:28:36 +09001110 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001111
1112 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1113 desc_info->data_size, direction);
1114 kfree(desc_info->data);
1115 }
1116 return err;
1117}
1118
Simon Hormane5054642015-05-25 14:28:36 +09001119static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
1120 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001121 int direction)
1122{
1123 struct pci_dev *pdev = rocker->pdev;
1124 int i;
1125
1126 for (i = 0; i < info->size; i++) {
Simon Hormane5054642015-05-25 14:28:36 +09001127 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001128 struct rocker_desc *desc = &info->desc[i];
1129
1130 desc->buf_addr = 0;
1131 desc->buf_size = 0;
1132 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1133 desc_info->data_size, direction);
1134 kfree(desc_info->data);
1135 }
1136}
1137
1138static int rocker_dma_rings_init(struct rocker *rocker)
1139{
Simon Hormane5054642015-05-25 14:28:36 +09001140 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001141 int err;
1142
1143 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1144 ROCKER_DMA_CMD_DEFAULT_SIZE,
1145 &rocker->cmd_ring);
1146 if (err) {
1147 dev_err(&pdev->dev, "failed to create command dma ring\n");
1148 return err;
1149 }
1150
1151 spin_lock_init(&rocker->cmd_ring_lock);
1152
1153 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1154 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1155 if (err) {
1156 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1157 goto err_dma_cmd_ring_bufs_alloc;
1158 }
1159
1160 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1161 ROCKER_DMA_EVENT_DEFAULT_SIZE,
1162 &rocker->event_ring);
1163 if (err) {
1164 dev_err(&pdev->dev, "failed to create event dma ring\n");
1165 goto err_dma_event_ring_create;
1166 }
1167
1168 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1169 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1170 if (err) {
1171 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1172 goto err_dma_event_ring_bufs_alloc;
1173 }
1174 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1175 return 0;
1176
1177err_dma_event_ring_bufs_alloc:
1178 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1179err_dma_event_ring_create:
1180 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1181 PCI_DMA_BIDIRECTIONAL);
1182err_dma_cmd_ring_bufs_alloc:
1183 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1184 return err;
1185}
1186
1187static void rocker_dma_rings_fini(struct rocker *rocker)
1188{
1189 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1190 PCI_DMA_BIDIRECTIONAL);
1191 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1192 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1193 PCI_DMA_BIDIRECTIONAL);
1194 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1195}
1196
Simon Horman534ba6a2015-06-01 13:25:04 +09001197static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001198 struct rocker_desc_info *desc_info,
1199 struct sk_buff *skb, size_t buf_len)
1200{
Simon Horman534ba6a2015-06-01 13:25:04 +09001201 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001202 struct pci_dev *pdev = rocker->pdev;
1203 dma_addr_t dma_handle;
1204
1205 dma_handle = pci_map_single(pdev, skb->data, buf_len,
1206 PCI_DMA_FROMDEVICE);
1207 if (pci_dma_mapping_error(pdev, dma_handle))
1208 return -EIO;
1209 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1210 goto tlv_put_failure;
1211 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1212 goto tlv_put_failure;
1213 return 0;
1214
1215tlv_put_failure:
1216 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1217 desc_info->tlv_size = 0;
1218 return -EMSGSIZE;
1219}
1220
Simon Hormane5054642015-05-25 14:28:36 +09001221static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001222{
1223 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1224}
1225
Simon Horman534ba6a2015-06-01 13:25:04 +09001226static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001227 struct rocker_desc_info *desc_info)
1228{
1229 struct net_device *dev = rocker_port->dev;
1230 struct sk_buff *skb;
1231 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1232 int err;
1233
1234 /* Ensure that hw will see tlv_size zero in case of an error.
1235 * That tells hw to use another descriptor.
1236 */
1237 rocker_desc_cookie_ptr_set(desc_info, NULL);
1238 desc_info->tlv_size = 0;
1239
1240 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1241 if (!skb)
1242 return -ENOMEM;
Simon Horman534ba6a2015-06-01 13:25:04 +09001243 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001244 if (err) {
1245 dev_kfree_skb_any(skb);
1246 return err;
1247 }
1248 rocker_desc_cookie_ptr_set(desc_info, skb);
1249 return 0;
1250}
1251
Simon Hormane5054642015-05-25 14:28:36 +09001252static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1253 const struct rocker_tlv **attrs)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001254{
1255 struct pci_dev *pdev = rocker->pdev;
1256 dma_addr_t dma_handle;
1257 size_t len;
1258
1259 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1260 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1261 return;
1262 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1263 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1264 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1265}
1266
Simon Hormane5054642015-05-25 14:28:36 +09001267static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1268 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001269{
Simon Hormane5054642015-05-25 14:28:36 +09001270 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001271 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1272
1273 if (!skb)
1274 return;
1275 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1276 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1277 dev_kfree_skb_any(skb);
1278}
1279
Simon Horman534ba6a2015-06-01 13:25:04 +09001280static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001281{
Simon Hormane5054642015-05-25 14:28:36 +09001282 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001283 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001284 int i;
1285 int err;
1286
1287 for (i = 0; i < rx_ring->size; i++) {
Simon Horman534ba6a2015-06-01 13:25:04 +09001288 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001289 &rx_ring->desc_info[i]);
1290 if (err)
1291 goto rollback;
1292 }
1293 return 0;
1294
1295rollback:
1296 for (i--; i >= 0; i--)
1297 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1298 return err;
1299}
1300
Simon Horman534ba6a2015-06-01 13:25:04 +09001301static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001302{
Simon Hormane5054642015-05-25 14:28:36 +09001303 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001304 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001305 int i;
1306
1307 for (i = 0; i < rx_ring->size; i++)
1308 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1309}
1310
1311static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1312{
1313 struct rocker *rocker = rocker_port->rocker;
1314 int err;
1315
1316 err = rocker_dma_ring_create(rocker,
1317 ROCKER_DMA_TX(rocker_port->port_number),
1318 ROCKER_DMA_TX_DEFAULT_SIZE,
1319 &rocker_port->tx_ring);
1320 if (err) {
1321 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1322 return err;
1323 }
1324
1325 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1326 PCI_DMA_TODEVICE,
1327 ROCKER_DMA_TX_DESC_SIZE);
1328 if (err) {
1329 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1330 goto err_dma_tx_ring_bufs_alloc;
1331 }
1332
1333 err = rocker_dma_ring_create(rocker,
1334 ROCKER_DMA_RX(rocker_port->port_number),
1335 ROCKER_DMA_RX_DEFAULT_SIZE,
1336 &rocker_port->rx_ring);
1337 if (err) {
1338 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1339 goto err_dma_rx_ring_create;
1340 }
1341
1342 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1343 PCI_DMA_BIDIRECTIONAL,
1344 ROCKER_DMA_RX_DESC_SIZE);
1345 if (err) {
1346 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1347 goto err_dma_rx_ring_bufs_alloc;
1348 }
1349
Simon Horman534ba6a2015-06-01 13:25:04 +09001350 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001351 if (err) {
1352 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1353 goto err_dma_rx_ring_skbs_alloc;
1354 }
1355 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1356
1357 return 0;
1358
1359err_dma_rx_ring_skbs_alloc:
1360 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1361 PCI_DMA_BIDIRECTIONAL);
1362err_dma_rx_ring_bufs_alloc:
1363 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1364err_dma_rx_ring_create:
1365 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1366 PCI_DMA_TODEVICE);
1367err_dma_tx_ring_bufs_alloc:
1368 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1369 return err;
1370}
1371
1372static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1373{
1374 struct rocker *rocker = rocker_port->rocker;
1375
Simon Horman534ba6a2015-06-01 13:25:04 +09001376 rocker_dma_rx_ring_skbs_free(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001377 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1378 PCI_DMA_BIDIRECTIONAL);
1379 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1380 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1381 PCI_DMA_TODEVICE);
1382 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1383}
1384
Simon Hormane5054642015-05-25 14:28:36 +09001385static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1386 bool enable)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001387{
1388 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1389
1390 if (enable)
David S. Miller71a83a62015-03-03 21:16:48 -05001391 val |= 1ULL << rocker_port->pport;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001392 else
David S. Miller71a83a62015-03-03 21:16:48 -05001393 val &= ~(1ULL << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001394 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1395}
1396
1397/********************************
1398 * Interrupt handler and helpers
1399 ********************************/
1400
1401static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1402{
1403 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001404 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001405 struct rocker_wait *wait;
1406 u32 credits = 0;
1407
1408 spin_lock(&rocker->cmd_ring_lock);
1409 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1410 wait = rocker_desc_cookie_ptr_get(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001411 if (wait->nowait) {
1412 rocker_desc_gen_clear(desc_info);
1413 rocker_wait_destroy(SWITCHDEV_TRANS_NONE, wait);
1414 } else {
1415 rocker_wait_wake_up(wait);
1416 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001417 credits++;
1418 }
1419 spin_unlock(&rocker->cmd_ring_lock);
1420 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1421
1422 return IRQ_HANDLED;
1423}
1424
Simon Hormane5054642015-05-25 14:28:36 +09001425static void rocker_port_link_up(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001426{
1427 netif_carrier_on(rocker_port->dev);
1428 netdev_info(rocker_port->dev, "Link is up\n");
1429}
1430
Simon Hormane5054642015-05-25 14:28:36 +09001431static void rocker_port_link_down(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001432{
1433 netif_carrier_off(rocker_port->dev);
1434 netdev_info(rocker_port->dev, "Link is down\n");
1435}
1436
Simon Hormane5054642015-05-25 14:28:36 +09001437static int rocker_event_link_change(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001438 const struct rocker_tlv *info)
1439{
Simon Hormane5054642015-05-25 14:28:36 +09001440 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001441 unsigned int port_number;
1442 bool link_up;
1443 struct rocker_port *rocker_port;
1444
1445 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001446 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001447 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1448 return -EIO;
1449 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001450 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001451 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1452
1453 if (port_number >= rocker->port_count)
1454 return -EINVAL;
1455
1456 rocker_port = rocker->ports[port_number];
1457 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1458 if (link_up)
1459 rocker_port_link_up(rocker_port);
1460 else
1461 rocker_port_link_down(rocker_port);
1462 }
1463
1464 return 0;
1465}
1466
Scott Feldman6c707942014-11-28 14:34:28 +01001467static int rocker_port_fdb(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001468 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01001469 const unsigned char *addr,
1470 __be16 vlan_id, int flags);
1471
Simon Hormane5054642015-05-25 14:28:36 +09001472static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
Scott Feldman6c707942014-11-28 14:34:28 +01001473 const struct rocker_tlv *info)
1474{
Simon Hormane5054642015-05-25 14:28:36 +09001475 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
Scott Feldman6c707942014-11-28 14:34:28 +01001476 unsigned int port_number;
1477 struct rocker_port *rocker_port;
Simon Hormane5054642015-05-25 14:28:36 +09001478 const unsigned char *addr;
Scott Feldman92014b92015-06-12 21:35:49 -07001479 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
Scott Feldman6c707942014-11-28 14:34:28 +01001480 __be16 vlan_id;
1481
1482 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001483 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
Scott Feldman6c707942014-11-28 14:34:28 +01001484 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1485 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1486 return -EIO;
1487 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001488 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
Scott Feldman6c707942014-11-28 14:34:28 +01001489 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
Jiri Pirko9b03c712014-12-03 14:14:53 +01001490 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
Scott Feldman6c707942014-11-28 14:34:28 +01001491
1492 if (port_number >= rocker->port_count)
1493 return -EINVAL;
1494
1495 rocker_port = rocker->ports[port_number];
1496
1497 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1498 rocker_port->stp_state != BR_STATE_FORWARDING)
1499 return 0;
1500
Scott Feldman92014b92015-06-12 21:35:49 -07001501 return rocker_port_fdb(rocker_port, SWITCHDEV_TRANS_NONE,
1502 addr, vlan_id, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01001503}
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001504
Simon Hormane5054642015-05-25 14:28:36 +09001505static int rocker_event_process(const struct rocker *rocker,
1506 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001507{
Simon Hormane5054642015-05-25 14:28:36 +09001508 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1509 const struct rocker_tlv *info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001510 u16 type;
1511
1512 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1513 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1514 !attrs[ROCKER_TLV_EVENT_INFO])
1515 return -EIO;
1516
1517 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1518 info = attrs[ROCKER_TLV_EVENT_INFO];
1519
1520 switch (type) {
1521 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1522 return rocker_event_link_change(rocker, info);
Scott Feldman6c707942014-11-28 14:34:28 +01001523 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1524 return rocker_event_mac_vlan_seen(rocker, info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001525 }
1526
1527 return -EOPNOTSUPP;
1528}
1529
1530static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1531{
1532 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001533 const struct pci_dev *pdev = rocker->pdev;
1534 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001535 u32 credits = 0;
1536 int err;
1537
1538 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1539 err = rocker_desc_err(desc_info);
1540 if (err) {
1541 dev_err(&pdev->dev, "event desc received with err %d\n",
1542 err);
1543 } else {
1544 err = rocker_event_process(rocker, desc_info);
1545 if (err)
1546 dev_err(&pdev->dev, "event processing failed with err %d\n",
1547 err);
1548 }
1549 rocker_desc_gen_clear(desc_info);
1550 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1551 credits++;
1552 }
1553 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1554
1555 return IRQ_HANDLED;
1556}
1557
1558static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1559{
1560 struct rocker_port *rocker_port = dev_id;
1561
1562 napi_schedule(&rocker_port->napi_tx);
1563 return IRQ_HANDLED;
1564}
1565
1566static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1567{
1568 struct rocker_port *rocker_port = dev_id;
1569
1570 napi_schedule(&rocker_port->napi_rx);
1571 return IRQ_HANDLED;
1572}
1573
1574/********************
1575 * Command interface
1576 ********************/
1577
Simon Horman534ba6a2015-06-01 13:25:04 +09001578typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001579 struct rocker_desc_info *desc_info,
1580 void *priv);
1581
Simon Horman534ba6a2015-06-01 13:25:04 +09001582typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001583 const struct rocker_desc_info *desc_info,
1584 void *priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001585
Simon Horman534ba6a2015-06-01 13:25:04 +09001586static int rocker_cmd_exec(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07001587 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09001588 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1589 rocker_cmd_proc_cb_t process, void *process_priv)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001590{
Simon Horman534ba6a2015-06-01 13:25:04 +09001591 struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001592 struct rocker_desc_info *desc_info;
1593 struct rocker_wait *wait;
Scott Feldman179f9a22015-06-12 21:35:46 -07001594 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1595 unsigned long lock_flags;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001596 int err;
1597
Scott Feldman179f9a22015-06-12 21:35:46 -07001598 wait = rocker_wait_create(rocker_port, trans, flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001599 if (!wait)
1600 return -ENOMEM;
Scott Feldman179f9a22015-06-12 21:35:46 -07001601 wait->nowait = nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001602
Scott Feldman179f9a22015-06-12 21:35:46 -07001603 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001604
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001605 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1606 if (!desc_info) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001607 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001608 err = -EAGAIN;
1609 goto out;
1610 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001611
Simon Horman534ba6a2015-06-01 13:25:04 +09001612 err = prepare(rocker_port, desc_info, prepare_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001613 if (err) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001614 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001615 goto out;
1616 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001617
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001618 rocker_desc_cookie_ptr_set(desc_info, wait);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001619
1620 if (trans != SWITCHDEV_TRANS_PREPARE)
1621 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1622
Scott Feldman179f9a22015-06-12 21:35:46 -07001623 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1624
1625 if (nowait)
1626 return 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001627
Scott Feldmanc4f20322015-05-10 09:47:50 -07001628 if (trans != SWITCHDEV_TRANS_PREPARE)
1629 if (!rocker_wait_event_timeout(wait, HZ / 10))
1630 return -EIO;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001631
1632 err = rocker_desc_err(desc_info);
1633 if (err)
1634 return err;
1635
1636 if (process)
Simon Horman534ba6a2015-06-01 13:25:04 +09001637 err = process(rocker_port, desc_info, process_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001638
1639 rocker_desc_gen_clear(desc_info);
1640out:
Simon Horman0985df72015-05-25 14:28:35 +09001641 rocker_wait_destroy(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001642 return err;
1643}
1644
1645static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001646rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001647 struct rocker_desc_info *desc_info,
1648 void *priv)
1649{
1650 struct rocker_tlv *cmd_info;
1651
1652 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1653 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1654 return -EMSGSIZE;
1655 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1656 if (!cmd_info)
1657 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001658 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1659 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001660 return -EMSGSIZE;
1661 rocker_tlv_nest_end(desc_info, cmd_info);
1662 return 0;
1663}
1664
1665static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001666rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001667 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001668 void *priv)
1669{
1670 struct ethtool_cmd *ecmd = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001671 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1672 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001673 u32 speed;
1674 u8 duplex;
1675 u8 autoneg;
1676
1677 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1678 if (!attrs[ROCKER_TLV_CMD_INFO])
1679 return -EIO;
1680
1681 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1682 attrs[ROCKER_TLV_CMD_INFO]);
1683 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1684 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1685 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1686 return -EIO;
1687
1688 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1689 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1690 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1691
1692 ecmd->transceiver = XCVR_INTERNAL;
1693 ecmd->supported = SUPPORTED_TP;
1694 ecmd->phy_address = 0xff;
1695 ecmd->port = PORT_TP;
1696 ethtool_cmd_speed_set(ecmd, speed);
1697 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1698 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1699
1700 return 0;
1701}
1702
1703static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001704rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001705 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001706 void *priv)
1707{
1708 unsigned char *macaddr = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001709 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1710 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1711 const struct rocker_tlv *attr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001712
1713 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1714 if (!attrs[ROCKER_TLV_CMD_INFO])
1715 return -EIO;
1716
1717 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1718 attrs[ROCKER_TLV_CMD_INFO]);
1719 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1720 if (!attr)
1721 return -EIO;
1722
1723 if (rocker_tlv_len(attr) != ETH_ALEN)
1724 return -EINVAL;
1725
1726 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1727 return 0;
1728}
1729
David Aherndb191702015-03-17 20:23:16 -06001730struct port_name {
1731 char *buf;
1732 size_t len;
1733};
1734
1735static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001736rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001737 const struct rocker_desc_info *desc_info,
David Aherndb191702015-03-17 20:23:16 -06001738 void *priv)
1739{
Simon Hormane5054642015-05-25 14:28:36 +09001740 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1741 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
David Aherndb191702015-03-17 20:23:16 -06001742 struct port_name *name = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001743 const struct rocker_tlv *attr;
David Aherndb191702015-03-17 20:23:16 -06001744 size_t i, j, len;
Simon Hormane5054642015-05-25 14:28:36 +09001745 const char *str;
David Aherndb191702015-03-17 20:23:16 -06001746
1747 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1748 if (!attrs[ROCKER_TLV_CMD_INFO])
1749 return -EIO;
1750
1751 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1752 attrs[ROCKER_TLV_CMD_INFO]);
1753 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1754 if (!attr)
1755 return -EIO;
1756
1757 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1758 str = rocker_tlv_data(attr);
1759
1760 /* make sure name only contains alphanumeric characters */
1761 for (i = j = 0; i < len; ++i) {
1762 if (isalnum(str[i])) {
1763 name->buf[j] = str[i];
1764 j++;
1765 }
1766 }
1767
1768 if (j == 0)
1769 return -EIO;
1770
1771 name->buf[j] = '\0';
1772
1773 return 0;
1774}
1775
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001776static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001777rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001778 struct rocker_desc_info *desc_info,
1779 void *priv)
1780{
1781 struct ethtool_cmd *ecmd = priv;
1782 struct rocker_tlv *cmd_info;
1783
1784 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1785 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1786 return -EMSGSIZE;
1787 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1788 if (!cmd_info)
1789 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001790 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1791 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001792 return -EMSGSIZE;
1793 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1794 ethtool_cmd_speed(ecmd)))
1795 return -EMSGSIZE;
1796 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1797 ecmd->duplex))
1798 return -EMSGSIZE;
1799 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1800 ecmd->autoneg))
1801 return -EMSGSIZE;
1802 rocker_tlv_nest_end(desc_info, cmd_info);
1803 return 0;
1804}
1805
1806static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001807rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001808 struct rocker_desc_info *desc_info,
1809 void *priv)
1810{
Simon Hormane5054642015-05-25 14:28:36 +09001811 const unsigned char *macaddr = priv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001812 struct rocker_tlv *cmd_info;
1813
1814 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1815 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1816 return -EMSGSIZE;
1817 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1818 if (!cmd_info)
1819 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001820 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1821 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001822 return -EMSGSIZE;
1823 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1824 ETH_ALEN, macaddr))
1825 return -EMSGSIZE;
1826 rocker_tlv_nest_end(desc_info, cmd_info);
1827 return 0;
1828}
1829
Scott Feldman5111f802014-11-28 14:34:30 +01001830static int
Scott Feldman77a58c72015-07-08 16:06:47 -07001831rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1832 struct rocker_desc_info *desc_info,
1833 void *priv)
1834{
1835 int mtu = *(int *)priv;
1836 struct rocker_tlv *cmd_info;
1837
1838 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1839 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1840 return -EMSGSIZE;
1841 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1842 if (!cmd_info)
1843 return -EMSGSIZE;
1844 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1845 rocker_port->pport))
1846 return -EMSGSIZE;
1847 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1848 mtu))
1849 return -EMSGSIZE;
1850 rocker_tlv_nest_end(desc_info, cmd_info);
1851 return 0;
1852}
1853
1854static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001855rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
Scott Feldman5111f802014-11-28 14:34:30 +01001856 struct rocker_desc_info *desc_info,
1857 void *priv)
1858{
1859 struct rocker_tlv *cmd_info;
1860
1861 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1862 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1863 return -EMSGSIZE;
1864 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1865 if (!cmd_info)
1866 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001867 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1868 rocker_port->pport))
Scott Feldman5111f802014-11-28 14:34:30 +01001869 return -EMSGSIZE;
1870 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1871 !!(rocker_port->brport_flags & BR_LEARNING)))
1872 return -EMSGSIZE;
1873 rocker_tlv_nest_end(desc_info, cmd_info);
1874 return 0;
1875}
1876
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001877static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1878 struct ethtool_cmd *ecmd)
1879{
Scott Feldman179f9a22015-06-12 21:35:46 -07001880 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001881 rocker_cmd_get_port_settings_prep, NULL,
1882 rocker_cmd_get_port_settings_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001883 ecmd);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001884}
1885
1886static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1887 unsigned char *macaddr)
1888{
Scott Feldman179f9a22015-06-12 21:35:46 -07001889 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001890 rocker_cmd_get_port_settings_prep, NULL,
1891 rocker_cmd_get_port_settings_macaddr_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001892 macaddr);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001893}
1894
1895static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1896 struct ethtool_cmd *ecmd)
1897{
Scott Feldman179f9a22015-06-12 21:35:46 -07001898 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001899 rocker_cmd_set_port_settings_ethtool_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001900 ecmd, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001901}
1902
1903static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1904 unsigned char *macaddr)
1905{
Scott Feldman179f9a22015-06-12 21:35:46 -07001906 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001907 rocker_cmd_set_port_settings_macaddr_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001908 macaddr, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001909}
1910
Scott Feldman77a58c72015-07-08 16:06:47 -07001911static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1912 int mtu)
1913{
1914 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
1915 rocker_cmd_set_port_settings_mtu_prep,
1916 &mtu, NULL, NULL);
1917}
1918
Scott Feldmanc4f20322015-05-10 09:47:50 -07001919static int rocker_port_set_learning(struct rocker_port *rocker_port,
1920 enum switchdev_trans trans)
Scott Feldman5111f802014-11-28 14:34:30 +01001921{
Scott Feldman179f9a22015-06-12 21:35:46 -07001922 return rocker_cmd_exec(rocker_port, trans, 0,
Scott Feldman5111f802014-11-28 14:34:30 +01001923 rocker_cmd_set_port_learning_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001924 NULL, NULL, NULL);
Scott Feldman5111f802014-11-28 14:34:30 +01001925}
1926
Simon Hormane5054642015-05-25 14:28:36 +09001927static int
1928rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1929 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001930{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001931 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1932 entry->key.ig_port.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001933 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001934 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1935 entry->key.ig_port.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001936 return -EMSGSIZE;
1937 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1938 entry->key.ig_port.goto_tbl))
1939 return -EMSGSIZE;
1940
1941 return 0;
1942}
1943
Simon Hormane5054642015-05-25 14:28:36 +09001944static int
1945rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1946 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001947{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001948 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1949 entry->key.vlan.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001950 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001951 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1952 entry->key.vlan.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001953 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001954 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1955 entry->key.vlan.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001956 return -EMSGSIZE;
1957 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1958 entry->key.vlan.goto_tbl))
1959 return -EMSGSIZE;
1960 if (entry->key.vlan.untagged &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01001961 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1962 entry->key.vlan.new_vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001963 return -EMSGSIZE;
1964
1965 return 0;
1966}
1967
Simon Hormane5054642015-05-25 14:28:36 +09001968static int
1969rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1970 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001971{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001972 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1973 entry->key.term_mac.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001974 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001975 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1976 entry->key.term_mac.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001977 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001978 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1979 entry->key.term_mac.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001980 return -EMSGSIZE;
1981 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1982 ETH_ALEN, entry->key.term_mac.eth_dst))
1983 return -EMSGSIZE;
1984 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1985 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1986 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001987 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1988 entry->key.term_mac.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001989 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001990 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1991 entry->key.term_mac.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001992 return -EMSGSIZE;
1993 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1994 entry->key.term_mac.goto_tbl))
1995 return -EMSGSIZE;
1996 if (entry->key.term_mac.copy_to_cpu &&
1997 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1998 entry->key.term_mac.copy_to_cpu))
1999 return -EMSGSIZE;
2000
2001 return 0;
2002}
2003
2004static int
2005rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002006 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002007{
Jiri Pirko9b03c712014-12-03 14:14:53 +01002008 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2009 entry->key.ucast_routing.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002010 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002011 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2012 entry->key.ucast_routing.dst4))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002013 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002014 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2015 entry->key.ucast_routing.dst4_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002016 return -EMSGSIZE;
2017 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2018 entry->key.ucast_routing.goto_tbl))
2019 return -EMSGSIZE;
2020 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2021 entry->key.ucast_routing.group_id))
2022 return -EMSGSIZE;
2023
2024 return 0;
2025}
2026
Simon Hormane5054642015-05-25 14:28:36 +09002027static int
2028rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2029 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002030{
2031 if (entry->key.bridge.has_eth_dst &&
2032 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2033 ETH_ALEN, entry->key.bridge.eth_dst))
2034 return -EMSGSIZE;
2035 if (entry->key.bridge.has_eth_dst_mask &&
2036 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2037 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2038 return -EMSGSIZE;
2039 if (entry->key.bridge.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002040 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2041 entry->key.bridge.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002042 return -EMSGSIZE;
2043 if (entry->key.bridge.tunnel_id &&
2044 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2045 entry->key.bridge.tunnel_id))
2046 return -EMSGSIZE;
2047 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2048 entry->key.bridge.goto_tbl))
2049 return -EMSGSIZE;
2050 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2051 entry->key.bridge.group_id))
2052 return -EMSGSIZE;
2053 if (entry->key.bridge.copy_to_cpu &&
2054 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2055 entry->key.bridge.copy_to_cpu))
2056 return -EMSGSIZE;
2057
2058 return 0;
2059}
2060
Simon Hormane5054642015-05-25 14:28:36 +09002061static int
2062rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2063 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002064{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002065 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2066 entry->key.acl.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002067 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002068 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2069 entry->key.acl.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002070 return -EMSGSIZE;
2071 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2072 ETH_ALEN, entry->key.acl.eth_src))
2073 return -EMSGSIZE;
2074 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2075 ETH_ALEN, entry->key.acl.eth_src_mask))
2076 return -EMSGSIZE;
2077 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2078 ETH_ALEN, entry->key.acl.eth_dst))
2079 return -EMSGSIZE;
2080 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2081 ETH_ALEN, entry->key.acl.eth_dst_mask))
2082 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002083 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2084 entry->key.acl.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002085 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002086 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2087 entry->key.acl.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002088 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002089 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2090 entry->key.acl.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002091 return -EMSGSIZE;
2092
2093 switch (ntohs(entry->key.acl.eth_type)) {
2094 case ETH_P_IP:
2095 case ETH_P_IPV6:
2096 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2097 entry->key.acl.ip_proto))
2098 return -EMSGSIZE;
2099 if (rocker_tlv_put_u8(desc_info,
2100 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2101 entry->key.acl.ip_proto_mask))
2102 return -EMSGSIZE;
2103 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2104 entry->key.acl.ip_tos & 0x3f))
2105 return -EMSGSIZE;
2106 if (rocker_tlv_put_u8(desc_info,
2107 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2108 entry->key.acl.ip_tos_mask & 0x3f))
2109 return -EMSGSIZE;
2110 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2111 (entry->key.acl.ip_tos & 0xc0) >> 6))
2112 return -EMSGSIZE;
2113 if (rocker_tlv_put_u8(desc_info,
2114 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2115 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2116 return -EMSGSIZE;
2117 break;
2118 }
2119
2120 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2121 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2122 entry->key.acl.group_id))
2123 return -EMSGSIZE;
2124
2125 return 0;
2126}
2127
Simon Horman534ba6a2015-06-01 13:25:04 +09002128static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002129 struct rocker_desc_info *desc_info,
2130 void *priv)
2131{
Simon Hormane5054642015-05-25 14:28:36 +09002132 const struct rocker_flow_tbl_entry *entry = priv;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002133 struct rocker_tlv *cmd_info;
2134 int err = 0;
2135
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002136 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002137 return -EMSGSIZE;
2138 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2139 if (!cmd_info)
2140 return -EMSGSIZE;
2141 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2142 entry->key.tbl_id))
2143 return -EMSGSIZE;
2144 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2145 entry->key.priority))
2146 return -EMSGSIZE;
2147 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2148 return -EMSGSIZE;
2149 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2150 entry->cookie))
2151 return -EMSGSIZE;
2152
2153 switch (entry->key.tbl_id) {
2154 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2155 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2156 break;
2157 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2158 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2159 break;
2160 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2161 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2162 break;
2163 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2164 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2165 break;
2166 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2167 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2168 break;
2169 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2170 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2171 break;
2172 default:
2173 err = -ENOTSUPP;
2174 break;
2175 }
2176
2177 if (err)
2178 return err;
2179
2180 rocker_tlv_nest_end(desc_info, cmd_info);
2181
2182 return 0;
2183}
2184
Simon Horman534ba6a2015-06-01 13:25:04 +09002185static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002186 struct rocker_desc_info *desc_info,
2187 void *priv)
2188{
2189 const struct rocker_flow_tbl_entry *entry = priv;
2190 struct rocker_tlv *cmd_info;
2191
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002192 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002193 return -EMSGSIZE;
2194 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2195 if (!cmd_info)
2196 return -EMSGSIZE;
2197 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2198 entry->cookie))
2199 return -EMSGSIZE;
2200 rocker_tlv_nest_end(desc_info, cmd_info);
2201
2202 return 0;
2203}
2204
2205static int
2206rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2207 struct rocker_group_tbl_entry *entry)
2208{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002209 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002210 ROCKER_GROUP_PORT_GET(entry->group_id)))
2211 return -EMSGSIZE;
2212 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2213 entry->l2_interface.pop_vlan))
2214 return -EMSGSIZE;
2215
2216 return 0;
2217}
2218
2219static int
2220rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002221 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002222{
2223 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2224 entry->l2_rewrite.group_id))
2225 return -EMSGSIZE;
2226 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2227 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2228 ETH_ALEN, entry->l2_rewrite.eth_src))
2229 return -EMSGSIZE;
2230 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2231 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2232 ETH_ALEN, entry->l2_rewrite.eth_dst))
2233 return -EMSGSIZE;
2234 if (entry->l2_rewrite.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002235 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2236 entry->l2_rewrite.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002237 return -EMSGSIZE;
2238
2239 return 0;
2240}
2241
2242static int
2243rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002244 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002245{
2246 int i;
2247 struct rocker_tlv *group_ids;
2248
2249 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2250 entry->group_count))
2251 return -EMSGSIZE;
2252
2253 group_ids = rocker_tlv_nest_start(desc_info,
2254 ROCKER_TLV_OF_DPA_GROUP_IDS);
2255 if (!group_ids)
2256 return -EMSGSIZE;
2257
2258 for (i = 0; i < entry->group_count; i++)
2259 /* Note TLV array is 1-based */
2260 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2261 return -EMSGSIZE;
2262
2263 rocker_tlv_nest_end(desc_info, group_ids);
2264
2265 return 0;
2266}
2267
2268static int
2269rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002270 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002271{
2272 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2273 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2274 ETH_ALEN, entry->l3_unicast.eth_src))
2275 return -EMSGSIZE;
2276 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2277 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2278 ETH_ALEN, entry->l3_unicast.eth_dst))
2279 return -EMSGSIZE;
2280 if (entry->l3_unicast.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002281 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2282 entry->l3_unicast.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002283 return -EMSGSIZE;
2284 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2285 entry->l3_unicast.ttl_check))
2286 return -EMSGSIZE;
2287 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2288 entry->l3_unicast.group_id))
2289 return -EMSGSIZE;
2290
2291 return 0;
2292}
2293
Simon Horman534ba6a2015-06-01 13:25:04 +09002294static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002295 struct rocker_desc_info *desc_info,
2296 void *priv)
2297{
2298 struct rocker_group_tbl_entry *entry = priv;
2299 struct rocker_tlv *cmd_info;
2300 int err = 0;
2301
2302 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2303 return -EMSGSIZE;
2304 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2305 if (!cmd_info)
2306 return -EMSGSIZE;
2307
2308 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2309 entry->group_id))
2310 return -EMSGSIZE;
2311
2312 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2313 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2314 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2315 break;
2316 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2317 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2318 break;
2319 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2320 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2321 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2322 break;
2323 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2324 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2325 break;
2326 default:
2327 err = -ENOTSUPP;
2328 break;
2329 }
2330
2331 if (err)
2332 return err;
2333
2334 rocker_tlv_nest_end(desc_info, cmd_info);
2335
2336 return 0;
2337}
2338
Simon Horman534ba6a2015-06-01 13:25:04 +09002339static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002340 struct rocker_desc_info *desc_info,
2341 void *priv)
2342{
2343 const struct rocker_group_tbl_entry *entry = priv;
2344 struct rocker_tlv *cmd_info;
2345
2346 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2347 return -EMSGSIZE;
2348 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2349 if (!cmd_info)
2350 return -EMSGSIZE;
2351 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2352 entry->group_id))
2353 return -EMSGSIZE;
2354 rocker_tlv_nest_end(desc_info, cmd_info);
2355
2356 return 0;
2357}
2358
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002359/***************************************************
2360 * Flow, group, FDB, internal VLAN and neigh tables
2361 ***************************************************/
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002362
2363static int rocker_init_tbls(struct rocker *rocker)
2364{
2365 hash_init(rocker->flow_tbl);
2366 spin_lock_init(&rocker->flow_tbl_lock);
2367
2368 hash_init(rocker->group_tbl);
2369 spin_lock_init(&rocker->group_tbl_lock);
2370
2371 hash_init(rocker->fdb_tbl);
2372 spin_lock_init(&rocker->fdb_tbl_lock);
2373
2374 hash_init(rocker->internal_vlan_tbl);
2375 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2376
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002377 hash_init(rocker->neigh_tbl);
2378 spin_lock_init(&rocker->neigh_tbl_lock);
2379
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002380 return 0;
2381}
2382
2383static void rocker_free_tbls(struct rocker *rocker)
2384{
2385 unsigned long flags;
2386 struct rocker_flow_tbl_entry *flow_entry;
2387 struct rocker_group_tbl_entry *group_entry;
2388 struct rocker_fdb_tbl_entry *fdb_entry;
2389 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002390 struct rocker_neigh_tbl_entry *neigh_entry;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002391 struct hlist_node *tmp;
2392 int bkt;
2393
2394 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2395 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2396 hash_del(&flow_entry->entry);
2397 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2398
2399 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2400 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2401 hash_del(&group_entry->entry);
2402 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2403
2404 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2405 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2406 hash_del(&fdb_entry->entry);
2407 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2408
2409 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2410 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2411 tmp, internal_vlan_entry, entry)
2412 hash_del(&internal_vlan_entry->entry);
2413 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002414
2415 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2416 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2417 hash_del(&neigh_entry->entry);
2418 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002419}
2420
2421static struct rocker_flow_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002422rocker_flow_tbl_find(const struct rocker *rocker,
2423 const struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002424{
2425 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002426 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002427
2428 hash_for_each_possible(rocker->flow_tbl, found,
2429 entry, match->key_crc32) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002430 if (memcmp(&found->key, &match->key, key_len) == 0)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002431 return found;
2432 }
2433
2434 return NULL;
2435}
2436
2437static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002438 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002439 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002440{
2441 struct rocker *rocker = rocker_port->rocker;
2442 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002443 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002444 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002445
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002446 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002447
Scott Feldman179f9a22015-06-12 21:35:46 -07002448 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002449
2450 found = rocker_flow_tbl_find(rocker, match);
2451
2452 if (found) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002453 match->cookie = found->cookie;
Scott Feldmanc4f20322015-05-10 09:47:50 -07002454 if (trans != SWITCHDEV_TRANS_PREPARE)
2455 hash_del(&found->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002456 rocker_port_kfree(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002457 found = match;
2458 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002459 } else {
2460 found = match;
2461 found->cookie = rocker->flow_tbl_next_cookie++;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002462 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002463 }
2464
Scott Feldmanc4f20322015-05-10 09:47:50 -07002465 if (trans != SWITCHDEV_TRANS_PREPARE)
2466 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002467
Scott Feldman179f9a22015-06-12 21:35:46 -07002468 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002469
Scott Feldman179f9a22015-06-12 21:35:46 -07002470 return rocker_cmd_exec(rocker_port, trans, flags,
2471 rocker_cmd_flow_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002472}
2473
2474static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002475 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002476 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002477{
2478 struct rocker *rocker = rocker_port->rocker;
2479 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002480 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002481 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002482 int err = 0;
2483
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002484 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002485
Scott Feldman179f9a22015-06-12 21:35:46 -07002486 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002487
2488 found = rocker_flow_tbl_find(rocker, match);
2489
2490 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002491 if (trans != SWITCHDEV_TRANS_PREPARE)
2492 hash_del(&found->entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002493 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002494 }
2495
Scott Feldman179f9a22015-06-12 21:35:46 -07002496 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002497
Simon Horman0985df72015-05-25 14:28:35 +09002498 rocker_port_kfree(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002499
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002500 if (found) {
Scott Feldman179f9a22015-06-12 21:35:46 -07002501 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002502 rocker_cmd_flow_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002503 found, NULL, NULL);
Simon Horman0985df72015-05-25 14:28:35 +09002504 rocker_port_kfree(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002505 }
2506
2507 return err;
2508}
2509
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002510static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002511 enum switchdev_trans trans, int flags,
2512 struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002513{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002514 if (flags & ROCKER_OP_FLAG_REMOVE)
Scott Feldman179f9a22015-06-12 21:35:46 -07002515 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002516 else
Scott Feldman179f9a22015-06-12 21:35:46 -07002517 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002518}
2519
2520static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002521 enum switchdev_trans trans, int flags,
2522 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002523 enum rocker_of_dpa_table_id goto_tbl)
2524{
2525 struct rocker_flow_tbl_entry *entry;
2526
Scott Feldman179f9a22015-06-12 21:35:46 -07002527 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002528 if (!entry)
2529 return -ENOMEM;
2530
2531 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2532 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002533 entry->key.ig_port.in_pport = in_pport;
2534 entry->key.ig_port.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002535 entry->key.ig_port.goto_tbl = goto_tbl;
2536
Scott Feldmanc4f20322015-05-10 09:47:50 -07002537 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002538}
2539
2540static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002541 enum switchdev_trans trans, int flags,
2542 u32 in_pport, __be16 vlan_id,
2543 __be16 vlan_id_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002544 enum rocker_of_dpa_table_id goto_tbl,
2545 bool untagged, __be16 new_vlan_id)
2546{
2547 struct rocker_flow_tbl_entry *entry;
2548
Scott Feldman179f9a22015-06-12 21:35:46 -07002549 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002550 if (!entry)
2551 return -ENOMEM;
2552
2553 entry->key.priority = ROCKER_PRIORITY_VLAN;
2554 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002555 entry->key.vlan.in_pport = in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002556 entry->key.vlan.vlan_id = vlan_id;
2557 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2558 entry->key.vlan.goto_tbl = goto_tbl;
2559
2560 entry->key.vlan.untagged = untagged;
2561 entry->key.vlan.new_vlan_id = new_vlan_id;
2562
Scott Feldmanc4f20322015-05-10 09:47:50 -07002563 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002564}
2565
2566static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002567 enum switchdev_trans trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002568 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002569 __be16 eth_type, const u8 *eth_dst,
2570 const u8 *eth_dst_mask, __be16 vlan_id,
2571 __be16 vlan_id_mask, bool copy_to_cpu,
2572 int flags)
2573{
2574 struct rocker_flow_tbl_entry *entry;
2575
Scott Feldman179f9a22015-06-12 21:35:46 -07002576 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002577 if (!entry)
2578 return -ENOMEM;
2579
2580 if (is_multicast_ether_addr(eth_dst)) {
2581 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2582 entry->key.term_mac.goto_tbl =
2583 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2584 } else {
2585 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2586 entry->key.term_mac.goto_tbl =
2587 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2588 }
2589
2590 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002591 entry->key.term_mac.in_pport = in_pport;
2592 entry->key.term_mac.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002593 entry->key.term_mac.eth_type = eth_type;
2594 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2595 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2596 entry->key.term_mac.vlan_id = vlan_id;
2597 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2598 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2599
Scott Feldmanc4f20322015-05-10 09:47:50 -07002600 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002601}
2602
2603static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002604 enum switchdev_trans trans, int flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002605 const u8 *eth_dst, const u8 *eth_dst_mask,
2606 __be16 vlan_id, u32 tunnel_id,
2607 enum rocker_of_dpa_table_id goto_tbl,
2608 u32 group_id, bool copy_to_cpu)
2609{
2610 struct rocker_flow_tbl_entry *entry;
2611 u32 priority;
2612 bool vlan_bridging = !!vlan_id;
2613 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2614 bool wild = false;
2615
Scott Feldman179f9a22015-06-12 21:35:46 -07002616 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002617 if (!entry)
2618 return -ENOMEM;
2619
2620 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2621
2622 if (eth_dst) {
2623 entry->key.bridge.has_eth_dst = 1;
2624 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2625 }
2626 if (eth_dst_mask) {
2627 entry->key.bridge.has_eth_dst_mask = 1;
2628 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
Simon Horman629161f2015-04-30 15:21:29 +09002629 if (!ether_addr_equal(eth_dst_mask, ff_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002630 wild = true;
2631 }
2632
2633 priority = ROCKER_PRIORITY_UNKNOWN;
Thomas Graf51ace882014-11-28 14:34:32 +01002634 if (vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002635 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002636 else if (vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002637 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002638 else if (vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002639 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
Thomas Graf51ace882014-11-28 14:34:32 +01002640 else if (!vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002641 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002642 else if (!vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002643 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002644 else if (!vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002645 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2646
2647 entry->key.priority = priority;
2648 entry->key.bridge.vlan_id = vlan_id;
2649 entry->key.bridge.tunnel_id = tunnel_id;
2650 entry->key.bridge.goto_tbl = goto_tbl;
2651 entry->key.bridge.group_id = group_id;
2652 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2653
Scott Feldmanc4f20322015-05-10 09:47:50 -07002654 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002655}
2656
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002657static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002658 enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002659 __be16 eth_type, __be32 dst,
2660 __be32 dst_mask, u32 priority,
2661 enum rocker_of_dpa_table_id goto_tbl,
2662 u32 group_id, int flags)
2663{
2664 struct rocker_flow_tbl_entry *entry;
2665
Scott Feldman179f9a22015-06-12 21:35:46 -07002666 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002667 if (!entry)
2668 return -ENOMEM;
2669
2670 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2671 entry->key.priority = priority;
2672 entry->key.ucast_routing.eth_type = eth_type;
2673 entry->key.ucast_routing.dst4 = dst;
2674 entry->key.ucast_routing.dst4_mask = dst_mask;
2675 entry->key.ucast_routing.goto_tbl = goto_tbl;
2676 entry->key.ucast_routing.group_id = group_id;
2677 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2678 ucast_routing.group_id);
2679
Scott Feldmanc4f20322015-05-10 09:47:50 -07002680 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002681}
2682
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002683static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002684 enum switchdev_trans trans, int flags,
2685 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002686 const u8 *eth_src, const u8 *eth_src_mask,
2687 const u8 *eth_dst, const u8 *eth_dst_mask,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002688 __be16 eth_type, __be16 vlan_id,
2689 __be16 vlan_id_mask, u8 ip_proto,
2690 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002691 u32 group_id)
2692{
2693 u32 priority;
2694 struct rocker_flow_tbl_entry *entry;
2695
Scott Feldman179f9a22015-06-12 21:35:46 -07002696 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002697 if (!entry)
2698 return -ENOMEM;
2699
2700 priority = ROCKER_PRIORITY_ACL_NORMAL;
2701 if (eth_dst && eth_dst_mask) {
Simon Horman629161f2015-04-30 15:21:29 +09002702 if (ether_addr_equal(eth_dst_mask, mcast_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002703 priority = ROCKER_PRIORITY_ACL_DFLT;
2704 else if (is_link_local_ether_addr(eth_dst))
2705 priority = ROCKER_PRIORITY_ACL_CTRL;
2706 }
2707
2708 entry->key.priority = priority;
2709 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002710 entry->key.acl.in_pport = in_pport;
2711 entry->key.acl.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002712
2713 if (eth_src)
2714 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2715 if (eth_src_mask)
2716 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2717 if (eth_dst)
2718 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2719 if (eth_dst_mask)
2720 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2721
2722 entry->key.acl.eth_type = eth_type;
2723 entry->key.acl.vlan_id = vlan_id;
2724 entry->key.acl.vlan_id_mask = vlan_id_mask;
2725 entry->key.acl.ip_proto = ip_proto;
2726 entry->key.acl.ip_proto_mask = ip_proto_mask;
2727 entry->key.acl.ip_tos = ip_tos;
2728 entry->key.acl.ip_tos_mask = ip_tos_mask;
2729 entry->key.acl.group_id = group_id;
2730
Scott Feldmanc4f20322015-05-10 09:47:50 -07002731 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002732}
2733
2734static struct rocker_group_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002735rocker_group_tbl_find(const struct rocker *rocker,
2736 const struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002737{
2738 struct rocker_group_tbl_entry *found;
2739
2740 hash_for_each_possible(rocker->group_tbl, found,
2741 entry, match->group_id) {
2742 if (found->group_id == match->group_id)
2743 return found;
2744 }
2745
2746 return NULL;
2747}
2748
Simon Horman0985df72015-05-25 14:28:35 +09002749static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002750 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002751{
2752 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2753 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2754 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
Simon Horman0985df72015-05-25 14:28:35 +09002755 rocker_port_kfree(trans, entry->group_ids);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002756 break;
2757 default:
2758 break;
2759 }
Simon Horman0985df72015-05-25 14:28:35 +09002760 rocker_port_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002761}
2762
2763static int rocker_group_tbl_add(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002764 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002765 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002766{
2767 struct rocker *rocker = rocker_port->rocker;
2768 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002769 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002770
Scott Feldman179f9a22015-06-12 21:35:46 -07002771 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002772
2773 found = rocker_group_tbl_find(rocker, match);
2774
2775 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002776 if (trans != SWITCHDEV_TRANS_PREPARE)
2777 hash_del(&found->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002778 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002779 found = match;
2780 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2781 } else {
2782 found = match;
2783 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2784 }
2785
Scott Feldmanc4f20322015-05-10 09:47:50 -07002786 if (trans != SWITCHDEV_TRANS_PREPARE)
2787 hash_add(rocker->group_tbl, &found->entry, found->group_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002788
Scott Feldman179f9a22015-06-12 21:35:46 -07002789 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002790
Scott Feldman179f9a22015-06-12 21:35:46 -07002791 return rocker_cmd_exec(rocker_port, trans, flags,
2792 rocker_cmd_group_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002793}
2794
2795static int rocker_group_tbl_del(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002796 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002797 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002798{
2799 struct rocker *rocker = rocker_port->rocker;
2800 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002801 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002802 int err = 0;
2803
Scott Feldman179f9a22015-06-12 21:35:46 -07002804 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002805
2806 found = rocker_group_tbl_find(rocker, match);
2807
2808 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002809 if (trans != SWITCHDEV_TRANS_PREPARE)
2810 hash_del(&found->entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002811 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2812 }
2813
Scott Feldman179f9a22015-06-12 21:35:46 -07002814 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002815
Simon Horman0985df72015-05-25 14:28:35 +09002816 rocker_group_tbl_entry_free(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002817
2818 if (found) {
Scott Feldman179f9a22015-06-12 21:35:46 -07002819 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002820 rocker_cmd_group_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002821 found, NULL, NULL);
Simon Horman0985df72015-05-25 14:28:35 +09002822 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002823 }
2824
2825 return err;
2826}
2827
2828static int rocker_group_tbl_do(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002829 enum switchdev_trans trans, int flags,
2830 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002831{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002832 if (flags & ROCKER_OP_FLAG_REMOVE)
Scott Feldman179f9a22015-06-12 21:35:46 -07002833 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002834 else
Scott Feldman179f9a22015-06-12 21:35:46 -07002835 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002836}
2837
2838static int rocker_group_l2_interface(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002839 enum switchdev_trans trans, int flags,
2840 __be16 vlan_id, u32 out_pport,
2841 int pop_vlan)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002842{
2843 struct rocker_group_tbl_entry *entry;
2844
Scott Feldman179f9a22015-06-12 21:35:46 -07002845 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002846 if (!entry)
2847 return -ENOMEM;
2848
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002849 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002850 entry->l2_interface.pop_vlan = pop_vlan;
2851
Scott Feldmanc4f20322015-05-10 09:47:50 -07002852 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002853}
2854
2855static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002856 enum switchdev_trans trans,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002857 int flags, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002858 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002859{
2860 struct rocker_group_tbl_entry *entry;
2861
Scott Feldman179f9a22015-06-12 21:35:46 -07002862 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002863 if (!entry)
2864 return -ENOMEM;
2865
2866 entry->group_id = group_id;
2867 entry->group_count = group_count;
2868
Scott Feldman179f9a22015-06-12 21:35:46 -07002869 entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
2870 group_count, sizeof(u32));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002871 if (!entry->group_ids) {
Simon Horman0985df72015-05-25 14:28:35 +09002872 rocker_port_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002873 return -ENOMEM;
2874 }
2875 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2876
Scott Feldmanc4f20322015-05-10 09:47:50 -07002877 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002878}
2879
2880static int rocker_group_l2_flood(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002881 enum switchdev_trans trans, int flags,
2882 __be16 vlan_id, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002883 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002884{
Scott Feldmanc4f20322015-05-10 09:47:50 -07002885 return rocker_group_l2_fan_out(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002886 group_count, group_ids,
2887 group_id);
2888}
2889
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002890static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002891 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09002892 u32 index, const u8 *src_mac, const u8 *dst_mac,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002893 __be16 vlan_id, bool ttl_check, u32 pport)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002894{
2895 struct rocker_group_tbl_entry *entry;
2896
Scott Feldman179f9a22015-06-12 21:35:46 -07002897 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002898 if (!entry)
2899 return -ENOMEM;
2900
2901 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2902 if (src_mac)
2903 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2904 if (dst_mac)
2905 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2906 entry->l3_unicast.vlan_id = vlan_id;
2907 entry->l3_unicast.ttl_check = ttl_check;
2908 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2909
Scott Feldmanc4f20322015-05-10 09:47:50 -07002910 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002911}
2912
2913static struct rocker_neigh_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002914rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002915{
2916 struct rocker_neigh_tbl_entry *found;
2917
Scott Feldman0f43deb2015-03-06 15:54:51 -08002918 hash_for_each_possible(rocker->neigh_tbl, found,
2919 entry, be32_to_cpu(ip_addr))
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002920 if (found->ip_addr == ip_addr)
2921 return found;
2922
2923 return NULL;
2924}
2925
2926static void _rocker_neigh_add(struct rocker *rocker,
Simon Horman550ecc92015-05-21 12:40:16 +09002927 enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002928 struct rocker_neigh_tbl_entry *entry)
2929{
Scott Feldman4d81db42015-06-12 21:24:40 -07002930 if (trans != SWITCHDEV_TRANS_COMMIT)
2931 entry->index = rocker->neigh_tbl_next_index++;
Simon Horman550ecc92015-05-21 12:40:16 +09002932 if (trans == SWITCHDEV_TRANS_PREPARE)
2933 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002934 entry->ref_count++;
Scott Feldman0f43deb2015-03-06 15:54:51 -08002935 hash_add(rocker->neigh_tbl, &entry->entry,
2936 be32_to_cpu(entry->ip_addr));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002937}
2938
Simon Horman0985df72015-05-25 14:28:35 +09002939static void _rocker_neigh_del(enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002940 struct rocker_neigh_tbl_entry *entry)
2941{
Simon Horman550ecc92015-05-21 12:40:16 +09002942 if (trans == SWITCHDEV_TRANS_PREPARE)
2943 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002944 if (--entry->ref_count == 0) {
2945 hash_del(&entry->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002946 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002947 }
2948}
2949
Scott Feldmanc4f20322015-05-10 09:47:50 -07002950static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
Simon Horman550ecc92015-05-21 12:40:16 +09002951 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09002952 const u8 *eth_dst, bool ttl_check)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002953{
2954 if (eth_dst) {
2955 ether_addr_copy(entry->eth_dst, eth_dst);
2956 entry->ttl_check = ttl_check;
Simon Horman550ecc92015-05-21 12:40:16 +09002957 } else if (trans != SWITCHDEV_TRANS_PREPARE) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002958 entry->ref_count++;
2959 }
2960}
2961
2962static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002963 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09002964 int flags, __be32 ip_addr, const u8 *eth_dst)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002965{
2966 struct rocker *rocker = rocker_port->rocker;
2967 struct rocker_neigh_tbl_entry *entry;
2968 struct rocker_neigh_tbl_entry *found;
2969 unsigned long lock_flags;
2970 __be16 eth_type = htons(ETH_P_IP);
2971 enum rocker_of_dpa_table_id goto_tbl =
2972 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2973 u32 group_id;
2974 u32 priority = 0;
2975 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2976 bool updating;
2977 bool removing;
2978 int err = 0;
2979
Scott Feldman179f9a22015-06-12 21:35:46 -07002980 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002981 if (!entry)
2982 return -ENOMEM;
2983
2984 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2985
2986 found = rocker_neigh_tbl_find(rocker, ip_addr);
2987
2988 updating = found && adding;
2989 removing = found && !adding;
2990 adding = !found && adding;
2991
2992 if (adding) {
2993 entry->ip_addr = ip_addr;
2994 entry->dev = rocker_port->dev;
2995 ether_addr_copy(entry->eth_dst, eth_dst);
2996 entry->ttl_check = true;
Simon Horman550ecc92015-05-21 12:40:16 +09002997 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002998 } else if (removing) {
2999 memcpy(entry, found, sizeof(*entry));
Simon Horman0985df72015-05-25 14:28:35 +09003000 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003001 } else if (updating) {
Simon Horman550ecc92015-05-21 12:40:16 +09003002 _rocker_neigh_update(found, trans, eth_dst, true);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003003 memcpy(entry, found, sizeof(*entry));
3004 } else {
3005 err = -ENOENT;
3006 }
3007
3008 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3009
3010 if (err)
3011 goto err_out;
3012
3013 /* For each active neighbor, we have an L3 unicast group and
3014 * a /32 route to the neighbor, which uses the L3 unicast
3015 * group. The L3 unicast group can also be referred to by
3016 * other routes' nexthops.
3017 */
3018
Scott Feldmanc4f20322015-05-10 09:47:50 -07003019 err = rocker_group_l3_unicast(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003020 entry->index,
3021 rocker_port->dev->dev_addr,
3022 entry->eth_dst,
3023 rocker_port->internal_vlan_id,
3024 entry->ttl_check,
3025 rocker_port->pport);
3026 if (err) {
3027 netdev_err(rocker_port->dev,
3028 "Error (%d) L3 unicast group index %d\n",
3029 err, entry->index);
3030 goto err_out;
3031 }
3032
3033 if (adding || removing) {
3034 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003035 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003036 eth_type, ip_addr,
3037 inet_make_mask(32),
3038 priority, goto_tbl,
3039 group_id, flags);
3040
3041 if (err)
3042 netdev_err(rocker_port->dev,
3043 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3044 err, &entry->ip_addr, group_id);
3045 }
3046
3047err_out:
3048 if (!adding)
Simon Horman0985df72015-05-25 14:28:35 +09003049 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003050
3051 return err;
3052}
3053
3054static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003055 enum switchdev_trans trans, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003056{
3057 struct net_device *dev = rocker_port->dev;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003058 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003059 int err = 0;
3060
Ying Xue4133fc02015-05-15 12:53:21 +08003061 if (!n) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003062 n = neigh_create(&arp_tbl, &ip_addr, dev);
Ying Xue4133fc02015-05-15 12:53:21 +08003063 if (IS_ERR(n))
3064 return IS_ERR(n);
3065 }
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003066
3067 /* If the neigh is already resolved, then go ahead and
3068 * install the entry, otherwise start the ARP process to
3069 * resolve the neigh.
3070 */
3071
3072 if (n->nud_state & NUD_VALID)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003073 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3074 ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003075 else
3076 neigh_event_send(n, NULL);
3077
Ying Xue4133fc02015-05-15 12:53:21 +08003078 neigh_release(n);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003079 return err;
3080}
3081
Scott Feldmanc4f20322015-05-10 09:47:50 -07003082static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3083 enum switchdev_trans trans, int flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003084 __be32 ip_addr, u32 *index)
3085{
3086 struct rocker *rocker = rocker_port->rocker;
3087 struct rocker_neigh_tbl_entry *entry;
3088 struct rocker_neigh_tbl_entry *found;
3089 unsigned long lock_flags;
3090 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3091 bool updating;
3092 bool removing;
3093 bool resolved = true;
3094 int err = 0;
3095
Scott Feldman179f9a22015-06-12 21:35:46 -07003096 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003097 if (!entry)
3098 return -ENOMEM;
3099
3100 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3101
3102 found = rocker_neigh_tbl_find(rocker, ip_addr);
3103 if (found)
3104 *index = found->index;
3105
3106 updating = found && adding;
3107 removing = found && !adding;
3108 adding = !found && adding;
3109
3110 if (adding) {
3111 entry->ip_addr = ip_addr;
3112 entry->dev = rocker_port->dev;
Simon Horman550ecc92015-05-21 12:40:16 +09003113 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003114 *index = entry->index;
3115 resolved = false;
3116 } else if (removing) {
Simon Horman0985df72015-05-25 14:28:35 +09003117 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003118 } else if (updating) {
Simon Horman550ecc92015-05-21 12:40:16 +09003119 _rocker_neigh_update(found, trans, NULL, false);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003120 resolved = !is_zero_ether_addr(found->eth_dst);
3121 } else {
3122 err = -ENOENT;
3123 }
3124
3125 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3126
3127 if (!adding)
Simon Horman0985df72015-05-25 14:28:35 +09003128 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003129
3130 if (err)
3131 return err;
3132
3133 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3134
3135 if (!resolved)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003136 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003137
3138 return err;
3139}
3140
Scott Feldman6c707942014-11-28 14:34:28 +01003141static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003142 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003143 int flags, __be16 vlan_id)
3144{
3145 struct rocker_port *p;
Simon Hormane5054642015-05-25 14:28:36 +09003146 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003147 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
Scott Feldman04f49fa2015-03-15 23:04:46 -07003148 u32 *group_ids;
Scott Feldman6c707942014-11-28 14:34:28 +01003149 u8 group_count = 0;
Scott Feldman04f49fa2015-03-15 23:04:46 -07003150 int err = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01003151 int i;
3152
Scott Feldman179f9a22015-06-12 21:35:46 -07003153 group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
3154 rocker->port_count, sizeof(u32));
Scott Feldman04f49fa2015-03-15 23:04:46 -07003155 if (!group_ids)
3156 return -ENOMEM;
3157
Scott Feldman6c707942014-11-28 14:34:28 +01003158 /* Adjust the flood group for this VLAN. The flood group
3159 * references an L2 interface group for each port in this
3160 * VLAN.
3161 */
3162
3163 for (i = 0; i < rocker->port_count; i++) {
3164 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003165 if (!p)
3166 continue;
Scott Feldman6c707942014-11-28 14:34:28 +01003167 if (!rocker_port_is_bridged(p))
3168 continue;
3169 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3170 group_ids[group_count++] =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003171 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003172 }
3173 }
3174
3175 /* If there are no bridged ports in this VLAN, we're done */
3176 if (group_count == 0)
Scott Feldman04f49fa2015-03-15 23:04:46 -07003177 goto no_ports_in_vlan;
Scott Feldman6c707942014-11-28 14:34:28 +01003178
Scott Feldmanc4f20322015-05-10 09:47:50 -07003179 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3180 group_count, group_ids, group_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003181 if (err)
3182 netdev_err(rocker_port->dev,
3183 "Error (%d) port VLAN l2 flood group\n", err);
3184
Scott Feldman04f49fa2015-03-15 23:04:46 -07003185no_ports_in_vlan:
Simon Horman0985df72015-05-25 14:28:35 +09003186 rocker_port_kfree(trans, group_ids);
Scott Feldman6c707942014-11-28 14:34:28 +01003187 return err;
3188}
3189
3190static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003191 enum switchdev_trans trans, int flags,
3192 __be16 vlan_id, bool pop_vlan)
Scott Feldman6c707942014-11-28 14:34:28 +01003193{
Simon Hormane5054642015-05-25 14:28:36 +09003194 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003195 struct rocker_port *p;
3196 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003197 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003198 int ref = 0;
3199 int err;
3200 int i;
3201
3202 /* An L2 interface group for this port in this VLAN, but
3203 * only when port STP state is LEARNING|FORWARDING.
3204 */
3205
3206 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3207 rocker_port->stp_state == BR_STATE_FORWARDING) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003208 out_pport = rocker_port->pport;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003209 err = rocker_group_l2_interface(rocker_port, trans, flags,
3210 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003211 if (err) {
3212 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003213 "Error (%d) port VLAN l2 group for pport %d\n",
3214 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003215 return err;
3216 }
3217 }
3218
3219 /* An L2 interface group for this VLAN to CPU port.
3220 * Add when first port joins this VLAN and destroy when
3221 * last port leaves this VLAN.
3222 */
3223
3224 for (i = 0; i < rocker->port_count; i++) {
3225 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003226 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003227 ref++;
3228 }
3229
3230 if ((!adding || ref != 1) && (adding || ref != 0))
3231 return 0;
3232
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003233 out_pport = 0;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003234 err = rocker_group_l2_interface(rocker_port, trans, flags,
3235 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003236 if (err) {
3237 netdev_err(rocker_port->dev,
3238 "Error (%d) port VLAN l2 group for CPU port\n", err);
3239 return err;
3240 }
3241
3242 return 0;
3243}
3244
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003245static struct rocker_ctrl {
3246 const u8 *eth_dst;
3247 const u8 *eth_dst_mask;
Jiri Pirko11e6c652014-12-03 14:14:54 +01003248 __be16 eth_type;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003249 bool acl;
3250 bool bridge;
3251 bool term;
3252 bool copy_to_cpu;
3253} rocker_ctrls[] = {
3254 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3255 /* pass link local multicast pkts up to CPU for filtering */
3256 .eth_dst = ll_mac,
3257 .eth_dst_mask = ll_mask,
3258 .acl = true,
3259 },
3260 [ROCKER_CTRL_LOCAL_ARP] = {
3261 /* pass local ARP pkts up to CPU */
3262 .eth_dst = zero_mac,
3263 .eth_dst_mask = zero_mac,
3264 .eth_type = htons(ETH_P_ARP),
3265 .acl = true,
3266 },
3267 [ROCKER_CTRL_IPV4_MCAST] = {
3268 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3269 .eth_dst = ipv4_mcast,
3270 .eth_dst_mask = ipv4_mask,
3271 .eth_type = htons(ETH_P_IP),
3272 .term = true,
3273 .copy_to_cpu = true,
3274 },
3275 [ROCKER_CTRL_IPV6_MCAST] = {
3276 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3277 .eth_dst = ipv6_mcast,
3278 .eth_dst_mask = ipv6_mask,
3279 .eth_type = htons(ETH_P_IPV6),
3280 .term = true,
3281 .copy_to_cpu = true,
3282 },
3283 [ROCKER_CTRL_DFLT_BRIDGING] = {
3284 /* flood any pkts on vlan */
3285 .bridge = true,
3286 .copy_to_cpu = true,
3287 },
Simon Horman82549732015-07-16 10:39:14 +09003288 [ROCKER_CTRL_DFLT_OVS] = {
3289 /* pass all pkts up to CPU */
3290 .eth_dst = zero_mac,
3291 .eth_dst_mask = zero_mac,
3292 .acl = true,
3293 },
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003294};
3295
3296static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003297 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003298 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003299{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003300 u32 in_pport = rocker_port->pport;
3301 u32 in_pport_mask = 0xffffffff;
3302 u32 out_pport = 0;
Simon Hormane5054642015-05-25 14:28:36 +09003303 const u8 *eth_src = NULL;
3304 const u8 *eth_src_mask = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003305 __be16 vlan_id_mask = htons(0xffff);
3306 u8 ip_proto = 0;
3307 u8 ip_proto_mask = 0;
3308 u8 ip_tos = 0;
3309 u8 ip_tos_mask = 0;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003310 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003311 int err;
3312
Scott Feldmanc4f20322015-05-10 09:47:50 -07003313 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003314 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003315 eth_src, eth_src_mask,
3316 ctrl->eth_dst, ctrl->eth_dst_mask,
3317 ctrl->eth_type,
3318 vlan_id, vlan_id_mask,
3319 ip_proto, ip_proto_mask,
3320 ip_tos, ip_tos_mask,
3321 group_id);
3322
3323 if (err)
3324 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3325
3326 return err;
3327}
3328
Scott Feldman6c707942014-11-28 14:34:28 +01003329static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003330 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003331 const struct rocker_ctrl *ctrl,
Scott Feldman6c707942014-11-28 14:34:28 +01003332 __be16 vlan_id)
3333{
3334 enum rocker_of_dpa_table_id goto_tbl =
3335 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3336 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3337 u32 tunnel_id = 0;
3338 int err;
3339
3340 if (!rocker_port_is_bridged(rocker_port))
3341 return 0;
3342
Scott Feldmanc4f20322015-05-10 09:47:50 -07003343 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003344 ctrl->eth_dst, ctrl->eth_dst_mask,
3345 vlan_id, tunnel_id,
3346 goto_tbl, group_id, ctrl->copy_to_cpu);
3347
3348 if (err)
3349 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3350
3351 return err;
3352}
3353
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003354static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003355 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003356 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003357{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003358 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003359 __be16 vlan_id_mask = htons(0xffff);
3360 int err;
3361
3362 if (ntohs(vlan_id) == 0)
3363 vlan_id = rocker_port->internal_vlan_id;
3364
Scott Feldmanc4f20322015-05-10 09:47:50 -07003365 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003366 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003367 ctrl->eth_type, ctrl->eth_dst,
3368 ctrl->eth_dst_mask, vlan_id,
3369 vlan_id_mask, ctrl->copy_to_cpu,
3370 flags);
3371
3372 if (err)
3373 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3374
3375 return err;
3376}
3377
Scott Feldmanc4f20322015-05-10 09:47:50 -07003378static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3379 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003380 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003381{
3382 if (ctrl->acl)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003383 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003384 ctrl, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003385 if (ctrl->bridge)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003386 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003387 ctrl, vlan_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003388
3389 if (ctrl->term)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003390 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003391 ctrl, vlan_id);
3392
3393 return -EOPNOTSUPP;
3394}
3395
3396static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003397 enum switchdev_trans trans, int flags,
3398 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003399{
3400 int err = 0;
3401 int i;
3402
3403 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3404 if (rocker_port->ctrls[i]) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003405 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003406 &rocker_ctrls[i], vlan_id);
3407 if (err)
3408 return err;
3409 }
3410 }
3411
3412 return err;
3413}
3414
Scott Feldmanc4f20322015-05-10 09:47:50 -07003415static int rocker_port_ctrl(struct rocker_port *rocker_port,
3416 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003417 const struct rocker_ctrl *ctrl)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003418{
3419 u16 vid;
3420 int err = 0;
3421
3422 for (vid = 1; vid < VLAN_N_VID; vid++) {
3423 if (!test_bit(vid, rocker_port->vlan_bitmap))
3424 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003425 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003426 ctrl, htons(vid));
3427 if (err)
3428 break;
3429 }
3430
3431 return err;
3432}
3433
Scott Feldmanc4f20322015-05-10 09:47:50 -07003434static int rocker_port_vlan(struct rocker_port *rocker_port,
3435 enum switchdev_trans trans, int flags, u16 vid)
Scott Feldman6c707942014-11-28 14:34:28 +01003436{
3437 enum rocker_of_dpa_table_id goto_tbl =
3438 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003439 u32 in_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003440 __be16 vlan_id = htons(vid);
3441 __be16 vlan_id_mask = htons(0xffff);
3442 __be16 internal_vlan_id;
3443 bool untagged;
3444 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3445 int err;
3446
3447 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3448
Scott Feldman9228ad22015-05-10 09:47:54 -07003449 if (adding && test_bit(ntohs(internal_vlan_id),
3450 rocker_port->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003451 return 0; /* already added */
Scott Feldman9228ad22015-05-10 09:47:54 -07003452 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3453 rocker_port->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003454 return 0; /* already removed */
3455
Scott Feldman9228ad22015-05-10 09:47:54 -07003456 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3457
Scott Feldman6c707942014-11-28 14:34:28 +01003458 if (adding) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003459 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003460 internal_vlan_id);
3461 if (err) {
3462 netdev_err(rocker_port->dev,
3463 "Error (%d) port ctrl vlan add\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003464 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003465 }
3466 }
3467
Scott Feldmanc4f20322015-05-10 09:47:50 -07003468 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003469 internal_vlan_id, untagged);
3470 if (err) {
3471 netdev_err(rocker_port->dev,
3472 "Error (%d) port VLAN l2 groups\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003473 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003474 }
3475
Scott Feldmanc4f20322015-05-10 09:47:50 -07003476 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003477 internal_vlan_id);
3478 if (err) {
3479 netdev_err(rocker_port->dev,
3480 "Error (%d) port VLAN l2 flood group\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003481 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003482 }
3483
Scott Feldmanc4f20322015-05-10 09:47:50 -07003484 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003485 in_pport, vlan_id, vlan_id_mask,
Scott Feldman6c707942014-11-28 14:34:28 +01003486 goto_tbl, untagged, internal_vlan_id);
3487 if (err)
3488 netdev_err(rocker_port->dev,
3489 "Error (%d) port VLAN table\n", err);
3490
Scott Feldman9228ad22015-05-10 09:47:54 -07003491err_out:
3492 if (trans == SWITCHDEV_TRANS_PREPARE)
3493 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3494
Scott Feldman6c707942014-11-28 14:34:28 +01003495 return err;
3496}
3497
Scott Feldmanc4f20322015-05-10 09:47:50 -07003498static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3499 enum switchdev_trans trans, int flags)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003500{
3501 enum rocker_of_dpa_table_id goto_tbl;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003502 u32 in_pport;
3503 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003504 int err;
3505
3506 /* Normal Ethernet Frames. Matches pkts from any local physical
3507 * ports. Goto VLAN tbl.
3508 */
3509
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003510 in_pport = 0;
3511 in_pport_mask = 0xffff0000;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003512 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3513
Scott Feldmanc4f20322015-05-10 09:47:50 -07003514 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003515 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003516 goto_tbl);
3517 if (err)
3518 netdev_err(rocker_port->dev,
3519 "Error (%d) ingress port table entry\n", err);
3520
3521 return err;
3522}
3523
Scott Feldman6c707942014-11-28 14:34:28 +01003524struct rocker_fdb_learn_work {
3525 struct work_struct work;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003526 struct rocker_port *rocker_port;
3527 enum switchdev_trans trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003528 int flags;
3529 u8 addr[ETH_ALEN];
3530 u16 vid;
3531};
3532
3533static void rocker_port_fdb_learn_work(struct work_struct *work)
3534{
Simon Hormane5054642015-05-25 14:28:36 +09003535 const struct rocker_fdb_learn_work *lw =
Scott Feldman6c707942014-11-28 14:34:28 +01003536 container_of(work, struct rocker_fdb_learn_work, work);
3537 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3538 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003539 struct switchdev_notifier_fdb_info info;
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003540
3541 info.addr = lw->addr;
3542 info.vid = lw->vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003543
Thomas Graf51ace882014-11-28 14:34:32 +01003544 if (learned && removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003545 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003546 lw->rocker_port->dev, &info.info);
Thomas Graf51ace882014-11-28 14:34:32 +01003547 else if (learned && !removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003548 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003549 lw->rocker_port->dev, &info.info);
Scott Feldman6c707942014-11-28 14:34:28 +01003550
Simon Horman0985df72015-05-25 14:28:35 +09003551 rocker_port_kfree(lw->trans, work);
Scott Feldman6c707942014-11-28 14:34:28 +01003552}
3553
3554static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003555 enum switchdev_trans trans, int flags,
3556 const u8 *addr, __be16 vlan_id)
Scott Feldman6c707942014-11-28 14:34:28 +01003557{
3558 struct rocker_fdb_learn_work *lw;
3559 enum rocker_of_dpa_table_id goto_tbl =
3560 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003561 u32 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003562 u32 tunnel_id = 0;
3563 u32 group_id = ROCKER_GROUP_NONE;
Scott Feldman5111f802014-11-28 14:34:30 +01003564 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
Scott Feldman6c707942014-11-28 14:34:28 +01003565 bool copy_to_cpu = false;
3566 int err;
3567
3568 if (rocker_port_is_bridged(rocker_port))
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003569 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003570
3571 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003572 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3573 NULL, vlan_id, tunnel_id, goto_tbl,
Scott Feldman6c707942014-11-28 14:34:28 +01003574 group_id, copy_to_cpu);
3575 if (err)
3576 return err;
3577 }
3578
Scott Feldman5111f802014-11-28 14:34:30 +01003579 if (!syncing)
3580 return 0;
3581
Scott Feldman6c707942014-11-28 14:34:28 +01003582 if (!rocker_port_is_bridged(rocker_port))
3583 return 0;
3584
Scott Feldman179f9a22015-06-12 21:35:46 -07003585 lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
Scott Feldman6c707942014-11-28 14:34:28 +01003586 if (!lw)
3587 return -ENOMEM;
3588
3589 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3590
Scott Feldmanc4f20322015-05-10 09:47:50 -07003591 lw->rocker_port = rocker_port;
3592 lw->trans = trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003593 lw->flags = flags;
3594 ether_addr_copy(lw->addr, addr);
3595 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3596
Scott Feldmanc4f20322015-05-10 09:47:50 -07003597 if (trans == SWITCHDEV_TRANS_PREPARE)
Simon Horman0985df72015-05-25 14:28:35 +09003598 rocker_port_kfree(trans, lw);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003599 else
3600 schedule_work(&lw->work);
Scott Feldman6c707942014-11-28 14:34:28 +01003601
3602 return 0;
3603}
3604
3605static struct rocker_fdb_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003606rocker_fdb_tbl_find(const struct rocker *rocker,
3607 const struct rocker_fdb_tbl_entry *match)
Scott Feldman6c707942014-11-28 14:34:28 +01003608{
3609 struct rocker_fdb_tbl_entry *found;
3610
3611 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3612 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3613 return found;
3614
3615 return NULL;
3616}
3617
3618static int rocker_port_fdb(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003619 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003620 const unsigned char *addr,
3621 __be16 vlan_id, int flags)
3622{
3623 struct rocker *rocker = rocker_port->rocker;
3624 struct rocker_fdb_tbl_entry *fdb;
3625 struct rocker_fdb_tbl_entry *found;
3626 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3627 unsigned long lock_flags;
3628
Scott Feldman179f9a22015-06-12 21:35:46 -07003629 fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
Scott Feldman6c707942014-11-28 14:34:28 +01003630 if (!fdb)
3631 return -ENOMEM;
3632
3633 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
Scott Feldmana471be42015-09-23 08:39:14 -07003634 fdb->touched = jiffies;
Scott Feldman4c660492015-09-23 08:39:15 -07003635 fdb->key.rocker_port = rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01003636 ether_addr_copy(fdb->key.addr, addr);
3637 fdb->key.vlan_id = vlan_id;
3638 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3639
3640 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3641
3642 found = rocker_fdb_tbl_find(rocker, fdb);
3643
Scott Feldmana471be42015-09-23 08:39:14 -07003644 if (found) {
3645 found->touched = jiffies;
3646 if (removing) {
3647 rocker_port_kfree(trans, fdb);
3648 if (trans != SWITCHDEV_TRANS_PREPARE)
3649 hash_del(&found->entry);
3650 }
3651 } else if (!removing) {
Simon Horman42e94882015-05-21 12:40:15 +09003652 if (trans != SWITCHDEV_TRANS_PREPARE)
Scott Feldmana471be42015-09-23 08:39:14 -07003653 hash_add(rocker->fdb_tbl, &fdb->entry,
3654 fdb->key_crc32);
Scott Feldman6c707942014-11-28 14:34:28 +01003655 }
3656
3657 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3658
3659 /* Check if adding and already exists, or removing and can't find */
3660 if (!found != !removing) {
Simon Horman0985df72015-05-25 14:28:35 +09003661 rocker_port_kfree(trans, fdb);
Scott Feldman6c707942014-11-28 14:34:28 +01003662 if (!found && removing)
3663 return 0;
3664 /* Refreshing existing to update aging timers */
3665 flags |= ROCKER_OP_FLAG_REFRESH;
3666 }
3667
Scott Feldmanc4f20322015-05-10 09:47:50 -07003668 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003669}
3670
Scott Feldmanc4f20322015-05-10 09:47:50 -07003671static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003672 enum switchdev_trans trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003673{
3674 struct rocker *rocker = rocker_port->rocker;
3675 struct rocker_fdb_tbl_entry *found;
3676 unsigned long lock_flags;
Scott Feldman6c707942014-11-28 14:34:28 +01003677 struct hlist_node *tmp;
3678 int bkt;
3679 int err = 0;
3680
3681 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3682 rocker_port->stp_state == BR_STATE_FORWARDING)
3683 return 0;
3684
Scott Feldman179f9a22015-06-12 21:35:46 -07003685 flags |= ROCKER_OP_FLAG_REMOVE;
3686
Scott Feldman6c707942014-11-28 14:34:28 +01003687 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3688
3689 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07003690 if (found->key.rocker_port != rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +01003691 continue;
3692 if (!found->learned)
3693 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003694 err = rocker_port_fdb_learn(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003695 found->key.addr,
3696 found->key.vlan_id);
3697 if (err)
3698 goto err_out;
Simon Horman3098ac32015-05-21 12:40:14 +09003699 if (trans != SWITCHDEV_TRANS_PREPARE)
3700 hash_del(&found->entry);
Scott Feldman6c707942014-11-28 14:34:28 +01003701 }
3702
3703err_out:
3704 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3705
3706 return err;
3707}
3708
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003709static int rocker_port_router_mac(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003710 enum switchdev_trans trans, int flags,
3711 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003712{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003713 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003714 __be16 eth_type;
3715 const u8 *dst_mac_mask = ff_mac;
3716 __be16 vlan_id_mask = htons(0xffff);
3717 bool copy_to_cpu = false;
3718 int err;
3719
3720 if (ntohs(vlan_id) == 0)
3721 vlan_id = rocker_port->internal_vlan_id;
3722
3723 eth_type = htons(ETH_P_IP);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003724 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003725 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003726 eth_type, rocker_port->dev->dev_addr,
3727 dst_mac_mask, vlan_id, vlan_id_mask,
3728 copy_to_cpu, flags);
3729 if (err)
3730 return err;
3731
3732 eth_type = htons(ETH_P_IPV6);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003733 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003734 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003735 eth_type, rocker_port->dev->dev_addr,
3736 dst_mac_mask, vlan_id, vlan_id_mask,
3737 copy_to_cpu, flags);
3738
3739 return err;
3740}
3741
Scott Feldmanc4f20322015-05-10 09:47:50 -07003742static int rocker_port_fwding(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003743 enum switchdev_trans trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003744{
3745 bool pop_vlan;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003746 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003747 __be16 vlan_id;
3748 u16 vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003749 int err;
3750
3751 /* Port will be forwarding-enabled if its STP state is LEARNING
3752 * or FORWARDING. Traffic from CPU can still egress, regardless of
3753 * port STP state. Use L2 interface group on port VLANs as a way
3754 * to toggle port forwarding: if forwarding is disabled, L2
3755 * interface group will not exist.
3756 */
3757
3758 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3759 rocker_port->stp_state != BR_STATE_FORWARDING)
3760 flags |= ROCKER_OP_FLAG_REMOVE;
3761
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003762 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003763 for (vid = 1; vid < VLAN_N_VID; vid++) {
3764 if (!test_bit(vid, rocker_port->vlan_bitmap))
3765 continue;
3766 vlan_id = htons(vid);
3767 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003768 err = rocker_group_l2_interface(rocker_port, trans, flags,
3769 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003770 if (err) {
3771 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003772 "Error (%d) port VLAN l2 group for pport %d\n",
3773 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003774 return err;
3775 }
3776 }
3777
3778 return 0;
3779}
3780
Scott Feldmanc4f20322015-05-10 09:47:50 -07003781static int rocker_port_stp_update(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003782 enum switchdev_trans trans, int flags,
3783 u8 state)
Scott Feldman6c707942014-11-28 14:34:28 +01003784{
3785 bool want[ROCKER_CTRL_MAX] = { 0, };
Scott Feldmanc4f20322015-05-10 09:47:50 -07003786 bool prev_ctrls[ROCKER_CTRL_MAX];
3787 u8 prev_state;
Scott Feldman6c707942014-11-28 14:34:28 +01003788 int err;
3789 int i;
3790
Scott Feldmanc4f20322015-05-10 09:47:50 -07003791 if (trans == SWITCHDEV_TRANS_PREPARE) {
3792 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3793 prev_state = rocker_port->stp_state;
3794 }
3795
Scott Feldman6c707942014-11-28 14:34:28 +01003796 if (rocker_port->stp_state == state)
3797 return 0;
3798
3799 rocker_port->stp_state = state;
3800
3801 switch (state) {
3802 case BR_STATE_DISABLED:
3803 /* port is completely disabled */
3804 break;
3805 case BR_STATE_LISTENING:
3806 case BR_STATE_BLOCKING:
3807 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3808 break;
3809 case BR_STATE_LEARNING:
3810 case BR_STATE_FORWARDING:
Simon Horman82549732015-07-16 10:39:14 +09003811 if (!rocker_port_is_ovsed(rocker_port))
3812 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003813 want[ROCKER_CTRL_IPV4_MCAST] = true;
3814 want[ROCKER_CTRL_IPV6_MCAST] = true;
3815 if (rocker_port_is_bridged(rocker_port))
3816 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
Simon Horman82549732015-07-16 10:39:14 +09003817 else if (rocker_port_is_ovsed(rocker_port))
3818 want[ROCKER_CTRL_DFLT_OVS] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003819 else
3820 want[ROCKER_CTRL_LOCAL_ARP] = true;
3821 break;
3822 }
3823
3824 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3825 if (want[i] != rocker_port->ctrls[i]) {
Scott Feldman179f9a22015-06-12 21:35:46 -07003826 int ctrl_flags = flags |
3827 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3828 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003829 &rocker_ctrls[i]);
3830 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003831 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003832 rocker_port->ctrls[i] = want[i];
3833 }
3834 }
3835
Scott Feldman179f9a22015-06-12 21:35:46 -07003836 err = rocker_port_fdb_flush(rocker_port, trans, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01003837 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003838 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003839
Scott Feldman179f9a22015-06-12 21:35:46 -07003840 err = rocker_port_fwding(rocker_port, trans, flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003841
3842err_out:
3843 if (trans == SWITCHDEV_TRANS_PREPARE) {
3844 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3845 rocker_port->stp_state = prev_state;
3846 }
3847
3848 return err;
Scott Feldman6c707942014-11-28 14:34:28 +01003849}
3850
Scott Feldmanc4f20322015-05-10 09:47:50 -07003851static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003852 enum switchdev_trans trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003853{
3854 if (rocker_port_is_bridged(rocker_port))
3855 /* bridge STP will enable port */
3856 return 0;
3857
3858 /* port is not bridged, so simulate going to FORWARDING state */
Scott Feldman179f9a22015-06-12 21:35:46 -07003859 return rocker_port_stp_update(rocker_port, trans, flags,
3860 BR_STATE_FORWARDING);
Scott Feldmane47172a2015-02-25 20:15:38 -08003861}
3862
Scott Feldmanc4f20322015-05-10 09:47:50 -07003863static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003864 enum switchdev_trans trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003865{
3866 if (rocker_port_is_bridged(rocker_port))
3867 /* bridge STP will disable port */
3868 return 0;
3869
3870 /* port is not bridged, so simulate going to DISABLED state */
Scott Feldman179f9a22015-06-12 21:35:46 -07003871 return rocker_port_stp_update(rocker_port, trans, flags,
3872 BR_STATE_DISABLED);
Scott Feldmane47172a2015-02-25 20:15:38 -08003873}
3874
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003875static struct rocker_internal_vlan_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003876rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003877{
3878 struct rocker_internal_vlan_tbl_entry *found;
3879
3880 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3881 entry, ifindex) {
3882 if (found->ifindex == ifindex)
3883 return found;
3884 }
3885
3886 return NULL;
3887}
3888
3889static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3890 int ifindex)
3891{
3892 struct rocker *rocker = rocker_port->rocker;
3893 struct rocker_internal_vlan_tbl_entry *entry;
3894 struct rocker_internal_vlan_tbl_entry *found;
3895 unsigned long lock_flags;
3896 int i;
3897
Simon Hormandf6a2062015-05-21 12:40:17 +09003898 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003899 if (!entry)
3900 return 0;
3901
3902 entry->ifindex = ifindex;
3903
3904 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3905
3906 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3907 if (found) {
Simon Hormandf6a2062015-05-21 12:40:17 +09003908 kfree(entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003909 goto found;
3910 }
3911
3912 found = entry;
3913 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3914
3915 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3916 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3917 continue;
3918 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3919 goto found;
3920 }
3921
3922 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3923
3924found:
3925 found->ref_count++;
3926 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3927
3928 return found->vlan_id;
3929}
3930
Simon Hormane5054642015-05-25 14:28:36 +09003931static void
3932rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3933 int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003934{
3935 struct rocker *rocker = rocker_port->rocker;
3936 struct rocker_internal_vlan_tbl_entry *found;
3937 unsigned long lock_flags;
3938 unsigned long bit;
3939
3940 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3941
3942 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3943 if (!found) {
3944 netdev_err(rocker_port->dev,
3945 "ifindex (%d) not found in internal VLAN tbl\n",
3946 ifindex);
3947 goto not_found;
3948 }
3949
3950 if (--found->ref_count <= 0) {
3951 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3952 clear_bit(bit, rocker->internal_vlan_bitmap);
3953 hash_del(&found->entry);
Simon Hormandf6a2062015-05-21 12:40:17 +09003954 kfree(found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003955 }
3956
3957not_found:
3958 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3959}
3960
Scott Feldmanc4f20322015-05-10 09:47:50 -07003961static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
3962 enum switchdev_trans trans, __be32 dst,
Simon Hormane5054642015-05-25 14:28:36 +09003963 int dst_len, const struct fib_info *fi,
3964 u32 tb_id, int flags)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003965{
Simon Hormane5054642015-05-25 14:28:36 +09003966 const struct fib_nh *nh;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003967 __be16 eth_type = htons(ETH_P_IP);
3968 __be32 dst_mask = inet_make_mask(dst_len);
3969 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3970 u32 priority = fi->fib_priority;
3971 enum rocker_of_dpa_table_id goto_tbl =
3972 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3973 u32 group_id;
3974 bool nh_on_port;
3975 bool has_gw;
3976 u32 index;
3977 int err;
3978
3979 /* XXX support ECMP */
3980
3981 nh = fi->fib_nh;
3982 nh_on_port = (fi->fib_dev == rocker_port->dev);
3983 has_gw = !!nh->nh_gw;
3984
3985 if (has_gw && nh_on_port) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003986 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003987 nh->nh_gw, &index);
3988 if (err)
3989 return err;
3990
3991 group_id = ROCKER_GROUP_L3_UNICAST(index);
3992 } else {
3993 /* Send to CPU for processing */
3994 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3995 }
3996
Scott Feldmanc4f20322015-05-10 09:47:50 -07003997 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003998 dst_mask, priority, goto_tbl,
3999 group_id, flags);
4000 if (err)
4001 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4002 err, &dst);
4003
4004 return err;
4005}
4006
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004007/*****************
4008 * Net device ops
4009 *****************/
4010
4011static int rocker_port_open(struct net_device *dev)
4012{
4013 struct rocker_port *rocker_port = netdev_priv(dev);
4014 int err;
4015
4016 err = rocker_port_dma_rings_init(rocker_port);
4017 if (err)
4018 return err;
4019
4020 err = request_irq(rocker_msix_tx_vector(rocker_port),
4021 rocker_tx_irq_handler, 0,
4022 rocker_driver_name, rocker_port);
4023 if (err) {
4024 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4025 goto err_request_tx_irq;
4026 }
4027
4028 err = request_irq(rocker_msix_rx_vector(rocker_port),
4029 rocker_rx_irq_handler, 0,
4030 rocker_driver_name, rocker_port);
4031 if (err) {
4032 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4033 goto err_request_rx_irq;
4034 }
4035
Scott Feldman179f9a22015-06-12 21:35:46 -07004036 err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01004037 if (err)
Scott Feldmane47172a2015-02-25 20:15:38 -08004038 goto err_fwd_enable;
Scott Feldman6c707942014-11-28 14:34:28 +01004039
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004040 napi_enable(&rocker_port->napi_tx);
4041 napi_enable(&rocker_port->napi_rx);
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004042 if (!dev->proto_down)
4043 rocker_port_set_enable(rocker_port, true);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004044 netif_start_queue(dev);
4045 return 0;
4046
Scott Feldmane47172a2015-02-25 20:15:38 -08004047err_fwd_enable:
Scott Feldman6c707942014-11-28 14:34:28 +01004048 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004049err_request_rx_irq:
4050 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4051err_request_tx_irq:
4052 rocker_port_dma_rings_fini(rocker_port);
4053 return err;
4054}
4055
4056static int rocker_port_stop(struct net_device *dev)
4057{
4058 struct rocker_port *rocker_port = netdev_priv(dev);
4059
4060 netif_stop_queue(dev);
4061 rocker_port_set_enable(rocker_port, false);
4062 napi_disable(&rocker_port->napi_rx);
4063 napi_disable(&rocker_port->napi_tx);
Scott Feldmanf66feaa2015-06-12 21:35:50 -07004064 rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE,
4065 ROCKER_OP_FLAG_NOWAIT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004066 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4067 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4068 rocker_port_dma_rings_fini(rocker_port);
4069
4070 return 0;
4071}
4072
Simon Hormane5054642015-05-25 14:28:36 +09004073static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4074 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004075{
Simon Hormane5054642015-05-25 14:28:36 +09004076 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004077 struct pci_dev *pdev = rocker->pdev;
Simon Hormane5054642015-05-25 14:28:36 +09004078 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004079 struct rocker_tlv *attr;
4080 int rem;
4081
4082 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4083 if (!attrs[ROCKER_TLV_TX_FRAGS])
4084 return;
4085 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
Simon Hormane5054642015-05-25 14:28:36 +09004086 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004087 dma_addr_t dma_handle;
4088 size_t len;
4089
4090 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4091 continue;
4092 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4093 attr);
4094 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4095 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4096 continue;
4097 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4098 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4099 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4100 }
4101}
4102
Simon Hormane5054642015-05-25 14:28:36 +09004103static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004104 struct rocker_desc_info *desc_info,
4105 char *buf, size_t buf_len)
4106{
Simon Hormane5054642015-05-25 14:28:36 +09004107 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004108 struct pci_dev *pdev = rocker->pdev;
4109 dma_addr_t dma_handle;
4110 struct rocker_tlv *frag;
4111
4112 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4113 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4114 if (net_ratelimit())
4115 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4116 return -EIO;
4117 }
4118 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4119 if (!frag)
4120 goto unmap_frag;
4121 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4122 dma_handle))
4123 goto nest_cancel;
4124 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4125 buf_len))
4126 goto nest_cancel;
4127 rocker_tlv_nest_end(desc_info, frag);
4128 return 0;
4129
4130nest_cancel:
4131 rocker_tlv_nest_cancel(desc_info, frag);
4132unmap_frag:
4133 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4134 return -EMSGSIZE;
4135}
4136
4137static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4138{
4139 struct rocker_port *rocker_port = netdev_priv(dev);
4140 struct rocker *rocker = rocker_port->rocker;
4141 struct rocker_desc_info *desc_info;
4142 struct rocker_tlv *frags;
4143 int i;
4144 int err;
4145
4146 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4147 if (unlikely(!desc_info)) {
4148 if (net_ratelimit())
4149 netdev_err(dev, "tx ring full when queue awake\n");
4150 return NETDEV_TX_BUSY;
4151 }
4152
4153 rocker_desc_cookie_ptr_set(desc_info, skb);
4154
4155 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4156 if (!frags)
4157 goto out;
4158 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4159 skb->data, skb_headlen(skb));
4160 if (err)
4161 goto nest_cancel;
Jiri Pirko95b9be62015-08-02 20:56:38 +02004162 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4163 err = skb_linearize(skb);
4164 if (err)
4165 goto unmap_frags;
4166 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004167
4168 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4169 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4170
4171 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4172 skb_frag_address(frag),
4173 skb_frag_size(frag));
4174 if (err)
4175 goto unmap_frags;
4176 }
4177 rocker_tlv_nest_end(desc_info, frags);
4178
4179 rocker_desc_gen_clear(desc_info);
4180 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4181
4182 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4183 if (!desc_info)
4184 netif_stop_queue(dev);
4185
4186 return NETDEV_TX_OK;
4187
4188unmap_frags:
4189 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4190nest_cancel:
4191 rocker_tlv_nest_cancel(desc_info, frags);
4192out:
4193 dev_kfree_skb(skb);
David Ahernf2bbca52015-01-16 14:22:29 -07004194 dev->stats.tx_dropped++;
4195
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004196 return NETDEV_TX_OK;
4197}
4198
4199static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4200{
4201 struct sockaddr *addr = p;
4202 struct rocker_port *rocker_port = netdev_priv(dev);
4203 int err;
4204
4205 if (!is_valid_ether_addr(addr->sa_data))
4206 return -EADDRNOTAVAIL;
4207
4208 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4209 if (err)
4210 return err;
4211 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4212 return 0;
4213}
4214
Scott Feldman77a58c72015-07-08 16:06:47 -07004215static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4216{
4217 struct rocker_port *rocker_port = netdev_priv(dev);
4218 int running = netif_running(dev);
4219 int err;
4220
4221#define ROCKER_PORT_MIN_MTU 68
4222#define ROCKER_PORT_MAX_MTU 9000
4223
4224 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4225 return -EINVAL;
4226
4227 if (running)
4228 rocker_port_stop(dev);
4229
4230 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4231 dev->mtu = new_mtu;
4232
4233 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4234 if (err)
4235 return err;
4236
4237 if (running)
4238 err = rocker_port_open(dev);
4239
4240 return err;
4241}
4242
David Aherndb191702015-03-17 20:23:16 -06004243static int rocker_port_get_phys_port_name(struct net_device *dev,
4244 char *buf, size_t len)
4245{
4246 struct rocker_port *rocker_port = netdev_priv(dev);
4247 struct port_name name = { .buf = buf, .len = len };
4248 int err;
4249
Scott Feldman179f9a22015-06-12 21:35:46 -07004250 err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
David Aherndb191702015-03-17 20:23:16 -06004251 rocker_cmd_get_port_settings_prep, NULL,
4252 rocker_cmd_get_port_settings_phys_name_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004253 &name);
David Aherndb191702015-03-17 20:23:16 -06004254
4255 return err ? -EOPNOTSUPP : 0;
4256}
4257
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004258static int rocker_port_change_proto_down(struct net_device *dev,
4259 bool proto_down)
4260{
4261 struct rocker_port *rocker_port = netdev_priv(dev);
4262
4263 if (rocker_port->dev->flags & IFF_UP)
4264 rocker_port_set_enable(rocker_port, !proto_down);
4265 rocker_port->dev->proto_down = proto_down;
4266 return 0;
4267}
4268
Scott Feldmandd19f832015-08-12 18:45:25 -07004269static void rocker_port_neigh_destroy(struct neighbour *n)
4270{
4271 struct rocker_port *rocker_port = netdev_priv(n->dev);
4272 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4273 __be32 ip_addr = *(__be32 *)n->primary_key;
4274
4275 rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
4276 flags, ip_addr, n->ha);
4277}
4278
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004279static const struct net_device_ops rocker_port_netdev_ops = {
4280 .ndo_open = rocker_port_open,
4281 .ndo_stop = rocker_port_stop,
4282 .ndo_start_xmit = rocker_port_xmit,
4283 .ndo_set_mac_address = rocker_port_set_mac_address,
Scott Feldman77a58c72015-07-08 16:06:47 -07004284 .ndo_change_mtu = rocker_port_change_mtu,
Scott Feldman85fdb952015-05-10 09:48:05 -07004285 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
Scott Feldmanfc8f40d2015-05-10 09:47:57 -07004286 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
Scott Feldman54ba5a02015-05-10 09:48:01 -07004287 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004288 .ndo_fdb_add = switchdev_port_fdb_add,
4289 .ndo_fdb_del = switchdev_port_fdb_del,
4290 .ndo_fdb_dump = switchdev_port_fdb_dump,
David Aherndb191702015-03-17 20:23:16 -06004291 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004292 .ndo_change_proto_down = rocker_port_change_proto_down,
Scott Feldmandd19f832015-08-12 18:45:25 -07004293 .ndo_neigh_destroy = rocker_port_neigh_destroy,
Scott Feldman98237d42015-03-15 21:07:15 -07004294};
4295
4296/********************
4297 * swdev interface
4298 ********************/
4299
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004300static int rocker_port_attr_get(struct net_device *dev,
4301 struct switchdev_attr *attr)
Scott Feldman98237d42015-03-15 21:07:15 -07004302{
Simon Hormane5054642015-05-25 14:28:36 +09004303 const struct rocker_port *rocker_port = netdev_priv(dev);
4304 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman98237d42015-03-15 21:07:15 -07004305
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004306 switch (attr->id) {
4307 case SWITCHDEV_ATTR_PORT_PARENT_ID:
Scott Feldman42275bd2015-05-13 11:16:50 -07004308 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4309 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004310 break;
Scott Feldman6004c862015-05-10 09:47:55 -07004311 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
Scott Feldman42275bd2015-05-13 11:16:50 -07004312 attr->u.brport_flags = rocker_port->brport_flags;
Scott Feldman6004c862015-05-10 09:47:55 -07004313 break;
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004314 default:
4315 return -EOPNOTSUPP;
4316 }
4317
Scott Feldman98237d42015-03-15 21:07:15 -07004318 return 0;
4319}
4320
Simon Hormane5054642015-05-25 14:28:36 +09004321static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
Scott Feldmanc4f20322015-05-10 09:47:50 -07004322{
4323 struct list_head *mem, *tmp;
4324
4325 list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
4326 list_del(mem);
4327 kfree(mem);
4328 }
4329}
4330
Scott Feldman6004c862015-05-10 09:47:55 -07004331static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4332 enum switchdev_trans trans,
4333 unsigned long brport_flags)
4334{
4335 unsigned long orig_flags;
4336 int err = 0;
4337
4338 orig_flags = rocker_port->brport_flags;
4339 rocker_port->brport_flags = brport_flags;
4340 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4341 err = rocker_port_set_learning(rocker_port, trans);
4342
4343 if (trans == SWITCHDEV_TRANS_PREPARE)
4344 rocker_port->brport_flags = orig_flags;
4345
4346 return err;
4347}
4348
Scott Feldmanc4f20322015-05-10 09:47:50 -07004349static int rocker_port_attr_set(struct net_device *dev,
4350 struct switchdev_attr *attr)
4351{
4352 struct rocker_port *rocker_port = netdev_priv(dev);
4353 int err = 0;
4354
4355 switch (attr->trans) {
4356 case SWITCHDEV_TRANS_PREPARE:
4357 BUG_ON(!list_empty(&rocker_port->trans_mem));
4358 break;
4359 case SWITCHDEV_TRANS_ABORT:
4360 rocker_port_trans_abort(rocker_port);
4361 return 0;
4362 default:
4363 break;
4364 }
4365
4366 switch (attr->id) {
Scott Feldman35636062015-05-10 09:47:51 -07004367 case SWITCHDEV_ATTR_PORT_STP_STATE:
Scott Feldmanac283932015-06-12 21:35:48 -07004368 err = rocker_port_stp_update(rocker_port, attr->trans,
4369 ROCKER_OP_FLAG_NOWAIT,
Scott Feldman42275bd2015-05-13 11:16:50 -07004370 attr->u.stp_state);
Scott Feldman35636062015-05-10 09:47:51 -07004371 break;
Scott Feldman6004c862015-05-10 09:47:55 -07004372 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
4373 err = rocker_port_brport_flags_set(rocker_port, attr->trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004374 attr->u.brport_flags);
Scott Feldman6004c862015-05-10 09:47:55 -07004375 break;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004376 default:
4377 err = -EOPNOTSUPP;
4378 break;
4379 }
4380
4381 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004382}
4383
Scott Feldman9228ad22015-05-10 09:47:54 -07004384static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4385 enum switchdev_trans trans, u16 vid, u16 flags)
4386{
4387 int err;
4388
4389 /* XXX deal with flags for PVID and untagged */
4390
4391 err = rocker_port_vlan(rocker_port, trans, 0, vid);
4392 if (err)
4393 return err;
4394
Scott Feldmancec04a62015-06-01 11:39:03 -07004395 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4396 if (err)
4397 rocker_port_vlan(rocker_port, trans,
4398 ROCKER_OP_FLAG_REMOVE, vid);
4399
4400 return err;
Scott Feldman9228ad22015-05-10 09:47:54 -07004401}
4402
4403static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4404 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004405 const struct switchdev_obj_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004406{
4407 u16 vid;
4408 int err;
4409
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004410 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004411 err = rocker_port_vlan_add(rocker_port, trans,
4412 vid, vlan->flags);
4413 if (err)
4414 return err;
4415 }
4416
4417 return 0;
4418}
4419
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004420static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4421 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004422 const struct switchdev_obj_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004423{
4424 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4425 int flags = 0;
4426
4427 if (!rocker_port_is_bridged(rocker_port))
4428 return -EINVAL;
4429
4430 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4431}
4432
Scott Feldman9228ad22015-05-10 09:47:54 -07004433static int rocker_port_obj_add(struct net_device *dev,
4434 struct switchdev_obj *obj)
4435{
4436 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004437 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004438 int err = 0;
4439
4440 switch (obj->trans) {
4441 case SWITCHDEV_TRANS_PREPARE:
4442 BUG_ON(!list_empty(&rocker_port->trans_mem));
4443 break;
4444 case SWITCHDEV_TRANS_ABORT:
4445 rocker_port_trans_abort(rocker_port);
4446 return 0;
4447 default:
4448 break;
4449 }
4450
4451 switch (obj->id) {
4452 case SWITCHDEV_OBJ_PORT_VLAN:
4453 err = rocker_port_vlans_add(rocker_port, obj->trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004454 &obj->u.vlan);
Scott Feldman9228ad22015-05-10 09:47:54 -07004455 break;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004456 case SWITCHDEV_OBJ_IPV4_FIB:
Scott Feldman42275bd2015-05-13 11:16:50 -07004457 fib4 = &obj->u.ipv4_fib;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004458 err = rocker_port_fib_ipv4(rocker_port, obj->trans,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004459 htonl(fib4->dst), fib4->dst_len,
Scott Feldman58c2cb12015-05-10 09:48:06 -07004460 fib4->fi, fib4->tb_id, 0);
4461 break;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004462 case SWITCHDEV_OBJ_PORT_FDB:
4463 err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
4464 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004465 default:
4466 err = -EOPNOTSUPP;
4467 break;
4468 }
4469
4470 return err;
4471}
4472
4473static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4474 u16 vid, u16 flags)
4475{
4476 int err;
4477
4478 err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
4479 ROCKER_OP_FLAG_REMOVE, htons(vid));
4480 if (err)
4481 return err;
4482
4483 return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
4484 ROCKER_OP_FLAG_REMOVE, vid);
4485}
4486
4487static int rocker_port_vlans_del(struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004488 const struct switchdev_obj_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004489{
4490 u16 vid;
4491 int err;
4492
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004493 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004494 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4495 if (err)
4496 return err;
4497 }
4498
4499 return 0;
4500}
4501
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004502static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4503 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004504 const struct switchdev_obj_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004505{
4506 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
Scott Feldmanb4ad7ba2015-06-14 11:33:11 -07004507 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004508
4509 if (!rocker_port_is_bridged(rocker_port))
4510 return -EINVAL;
4511
4512 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4513}
4514
Scott Feldman9228ad22015-05-10 09:47:54 -07004515static int rocker_port_obj_del(struct net_device *dev,
4516 struct switchdev_obj *obj)
4517{
4518 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004519 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004520 int err = 0;
4521
4522 switch (obj->id) {
4523 case SWITCHDEV_OBJ_PORT_VLAN:
Scott Feldman42275bd2015-05-13 11:16:50 -07004524 err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
Scott Feldman9228ad22015-05-10 09:47:54 -07004525 break;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004526 case SWITCHDEV_OBJ_IPV4_FIB:
Scott Feldman42275bd2015-05-13 11:16:50 -07004527 fib4 = &obj->u.ipv4_fib;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004528 err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004529 htonl(fib4->dst), fib4->dst_len,
4530 fib4->fi, fib4->tb_id,
4531 ROCKER_OP_FLAG_REMOVE);
Scott Feldman58c2cb12015-05-10 09:48:06 -07004532 break;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004533 case SWITCHDEV_OBJ_PORT_FDB:
4534 err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
4535 break;
4536 default:
4537 err = -EOPNOTSUPP;
4538 break;
4539 }
4540
4541 return err;
4542}
4543
Simon Hormane5054642015-05-25 14:28:36 +09004544static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004545 struct switchdev_obj *obj)
4546{
4547 struct rocker *rocker = rocker_port->rocker;
4548 struct switchdev_obj_fdb *fdb = &obj->u.fdb;
4549 struct rocker_fdb_tbl_entry *found;
4550 struct hlist_node *tmp;
4551 unsigned long lock_flags;
4552 int bkt;
4553 int err = 0;
4554
4555 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4556 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07004557 if (found->key.rocker_port != rocker_port)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004558 continue;
David S. Millercdf09692015-08-11 12:00:37 -07004559 fdb->addr = found->key.addr;
Vivien Didelotce80e7b2015-08-10 09:09:52 -04004560 fdb->ndm_state = NUD_REACHABLE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004561 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4562 found->key.vlan_id);
4563 err = obj->cb(rocker_port->dev, obj);
4564 if (err)
4565 break;
4566 }
4567 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4568
4569 return err;
4570}
4571
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004572static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4573 struct switchdev_obj *obj)
4574{
4575 struct switchdev_obj_vlan *vlan = &obj->u.vlan;
4576 u16 vid;
4577 int err = 0;
4578
4579 for (vid = 1; vid < VLAN_N_VID; vid++) {
4580 if (!test_bit(vid, rocker_port->vlan_bitmap))
4581 continue;
4582 vlan->flags = 0;
4583 if (rocker_vlan_id_is_internal(htons(vid)))
4584 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4585 vlan->vid_begin = vlan->vid_end = vid;
4586 err = obj->cb(rocker_port->dev, obj);
4587 if (err)
4588 break;
4589 }
4590
4591 return err;
4592}
4593
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004594static int rocker_port_obj_dump(struct net_device *dev,
4595 struct switchdev_obj *obj)
4596{
Simon Hormane5054642015-05-25 14:28:36 +09004597 const struct rocker_port *rocker_port = netdev_priv(dev);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004598 int err = 0;
4599
4600 switch (obj->id) {
4601 case SWITCHDEV_OBJ_PORT_FDB:
4602 err = rocker_port_fdb_dump(rocker_port, obj);
4603 break;
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004604 case SWITCHDEV_OBJ_PORT_VLAN:
4605 err = rocker_port_vlan_dump(rocker_port, obj);
4606 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004607 default:
4608 err = -EOPNOTSUPP;
4609 break;
4610 }
4611
4612 return err;
4613}
4614
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004615static const struct switchdev_ops rocker_port_switchdev_ops = {
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004616 .switchdev_port_attr_get = rocker_port_attr_get,
Scott Feldman35636062015-05-10 09:47:51 -07004617 .switchdev_port_attr_set = rocker_port_attr_set,
Scott Feldman9228ad22015-05-10 09:47:54 -07004618 .switchdev_port_obj_add = rocker_port_obj_add,
4619 .switchdev_port_obj_del = rocker_port_obj_del,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004620 .switchdev_port_obj_dump = rocker_port_obj_dump,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004621};
4622
4623/********************
4624 * ethtool interface
4625 ********************/
4626
4627static int rocker_port_get_settings(struct net_device *dev,
4628 struct ethtool_cmd *ecmd)
4629{
4630 struct rocker_port *rocker_port = netdev_priv(dev);
4631
4632 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4633}
4634
4635static int rocker_port_set_settings(struct net_device *dev,
4636 struct ethtool_cmd *ecmd)
4637{
4638 struct rocker_port *rocker_port = netdev_priv(dev);
4639
4640 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4641}
4642
4643static void rocker_port_get_drvinfo(struct net_device *dev,
4644 struct ethtool_drvinfo *drvinfo)
4645{
4646 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4647 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4648}
4649
David Ahern9766e972015-01-29 20:59:33 -07004650static struct rocker_port_stats {
4651 char str[ETH_GSTRING_LEN];
4652 int type;
4653} rocker_port_stats[] = {
4654 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4655 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4656 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4657 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4658
4659 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4660 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4661 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4662 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4663};
4664
4665#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4666
4667static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4668 u8 *data)
4669{
4670 u8 *p = data;
4671 int i;
4672
4673 switch (stringset) {
4674 case ETH_SS_STATS:
4675 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4676 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4677 p += ETH_GSTRING_LEN;
4678 }
4679 break;
4680 }
4681}
4682
4683static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004684rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
David Ahern9766e972015-01-29 20:59:33 -07004685 struct rocker_desc_info *desc_info,
4686 void *priv)
4687{
4688 struct rocker_tlv *cmd_stats;
4689
4690 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4691 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4692 return -EMSGSIZE;
4693
4694 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4695 if (!cmd_stats)
4696 return -EMSGSIZE;
4697
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004698 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4699 rocker_port->pport))
David Ahern9766e972015-01-29 20:59:33 -07004700 return -EMSGSIZE;
4701
4702 rocker_tlv_nest_end(desc_info, cmd_stats);
4703
4704 return 0;
4705}
4706
4707static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004708rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004709 const struct rocker_desc_info *desc_info,
David Ahern9766e972015-01-29 20:59:33 -07004710 void *priv)
4711{
Simon Hormane5054642015-05-25 14:28:36 +09004712 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4713 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4714 const struct rocker_tlv *pattr;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004715 u32 pport;
David Ahern9766e972015-01-29 20:59:33 -07004716 u64 *data = priv;
4717 int i;
4718
4719 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4720
4721 if (!attrs[ROCKER_TLV_CMD_INFO])
4722 return -EIO;
4723
4724 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4725 attrs[ROCKER_TLV_CMD_INFO]);
4726
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004727 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
David Ahern9766e972015-01-29 20:59:33 -07004728 return -EIO;
4729
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004730 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4731 if (pport != rocker_port->pport)
David Ahern9766e972015-01-29 20:59:33 -07004732 return -EIO;
4733
4734 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4735 pattr = stats_attrs[rocker_port_stats[i].type];
4736 if (!pattr)
4737 continue;
4738
4739 data[i] = rocker_tlv_get_u64(pattr);
4740 }
4741
4742 return 0;
4743}
4744
4745static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4746 void *priv)
4747{
Scott Feldman179f9a22015-06-12 21:35:46 -07004748 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
David Ahern9766e972015-01-29 20:59:33 -07004749 rocker_cmd_get_port_stats_prep, NULL,
4750 rocker_cmd_get_port_stats_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004751 priv);
David Ahern9766e972015-01-29 20:59:33 -07004752}
4753
4754static void rocker_port_get_stats(struct net_device *dev,
4755 struct ethtool_stats *stats, u64 *data)
4756{
4757 struct rocker_port *rocker_port = netdev_priv(dev);
4758
4759 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4760 int i;
4761
4762 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4763 data[i] = 0;
4764 }
David Ahern9766e972015-01-29 20:59:33 -07004765}
4766
4767static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4768{
4769 switch (sset) {
4770 case ETH_SS_STATS:
4771 return ROCKER_PORT_STATS_LEN;
4772 default:
4773 return -EOPNOTSUPP;
4774 }
4775}
4776
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004777static const struct ethtool_ops rocker_port_ethtool_ops = {
4778 .get_settings = rocker_port_get_settings,
4779 .set_settings = rocker_port_set_settings,
4780 .get_drvinfo = rocker_port_get_drvinfo,
4781 .get_link = ethtool_op_get_link,
David Ahern9766e972015-01-29 20:59:33 -07004782 .get_strings = rocker_port_get_strings,
4783 .get_ethtool_stats = rocker_port_get_stats,
4784 .get_sset_count = rocker_port_get_sset_count,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004785};
4786
4787/*****************
4788 * NAPI interface
4789 *****************/
4790
4791static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4792{
4793 return container_of(napi, struct rocker_port, napi_tx);
4794}
4795
4796static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4797{
4798 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09004799 const struct rocker *rocker = rocker_port->rocker;
4800 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004801 u32 credits = 0;
4802 int err;
4803
4804 /* Cleanup tx descriptors */
4805 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
David Ahernf2bbca52015-01-16 14:22:29 -07004806 struct sk_buff *skb;
4807
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004808 err = rocker_desc_err(desc_info);
4809 if (err && net_ratelimit())
4810 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4811 err);
4812 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
David Ahernf2bbca52015-01-16 14:22:29 -07004813
4814 skb = rocker_desc_cookie_ptr_get(desc_info);
4815 if (err == 0) {
4816 rocker_port->dev->stats.tx_packets++;
4817 rocker_port->dev->stats.tx_bytes += skb->len;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004818 } else {
David Ahernf2bbca52015-01-16 14:22:29 -07004819 rocker_port->dev->stats.tx_errors++;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004820 }
David Ahernf2bbca52015-01-16 14:22:29 -07004821
4822 dev_kfree_skb_any(skb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004823 credits++;
4824 }
4825
4826 if (credits && netif_queue_stopped(rocker_port->dev))
4827 netif_wake_queue(rocker_port->dev);
4828
4829 napi_complete(napi);
4830 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4831
4832 return 0;
4833}
4834
Simon Hormane5054642015-05-25 14:28:36 +09004835static int rocker_port_rx_proc(const struct rocker *rocker,
4836 const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004837 struct rocker_desc_info *desc_info)
4838{
Simon Hormane5054642015-05-25 14:28:36 +09004839 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004840 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4841 size_t rx_len;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004842 u16 rx_flags = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004843
4844 if (!skb)
4845 return -ENOENT;
4846
4847 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4848 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4849 return -EINVAL;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004850 if (attrs[ROCKER_TLV_RX_FLAGS])
4851 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004852
4853 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4854
4855 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4856 skb_put(skb, rx_len);
4857 skb->protocol = eth_type_trans(skb, rocker_port->dev);
David Ahernf2bbca52015-01-16 14:22:29 -07004858
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004859 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4860 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4861
David Ahernf2bbca52015-01-16 14:22:29 -07004862 rocker_port->dev->stats.rx_packets++;
4863 rocker_port->dev->stats.rx_bytes += skb->len;
4864
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004865 netif_receive_skb(skb);
4866
Simon Horman534ba6a2015-06-01 13:25:04 +09004867 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004868}
4869
4870static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4871{
4872 return container_of(napi, struct rocker_port, napi_rx);
4873}
4874
4875static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4876{
4877 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09004878 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004879 struct rocker_desc_info *desc_info;
4880 u32 credits = 0;
4881 int err;
4882
4883 /* Process rx descriptors */
4884 while (credits < budget &&
4885 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4886 err = rocker_desc_err(desc_info);
4887 if (err) {
4888 if (net_ratelimit())
4889 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4890 err);
4891 } else {
4892 err = rocker_port_rx_proc(rocker, rocker_port,
4893 desc_info);
4894 if (err && net_ratelimit())
4895 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4896 err);
4897 }
David Ahernf2bbca52015-01-16 14:22:29 -07004898 if (err)
4899 rocker_port->dev->stats.rx_errors++;
4900
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004901 rocker_desc_gen_clear(desc_info);
4902 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4903 credits++;
4904 }
4905
4906 if (credits < budget)
4907 napi_complete(napi);
4908
4909 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4910
4911 return credits;
4912}
4913
4914/*****************
4915 * PCI driver ops
4916 *****************/
4917
Simon Hormane5054642015-05-25 14:28:36 +09004918static void rocker_carrier_init(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004919{
Simon Hormane5054642015-05-25 14:28:36 +09004920 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004921 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4922 bool link_up;
4923
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004924 link_up = link_status & (1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004925 if (link_up)
4926 netif_carrier_on(rocker_port->dev);
4927 else
4928 netif_carrier_off(rocker_port->dev);
4929}
4930
Simon Hormane5054642015-05-25 14:28:36 +09004931static void rocker_remove_ports(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004932{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004933 struct rocker_port *rocker_port;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004934 int i;
4935
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004936 for (i = 0; i < rocker->port_count; i++) {
4937 rocker_port = rocker->ports[i];
Scott Feldmana0720312015-06-12 21:09:44 -07004938 if (!rocker_port)
4939 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004940 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4941 ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004942 unregister_netdev(rocker_port->dev);
Ido Schimmel1ebd47e2015-08-02 19:29:16 +02004943 free_netdev(rocker_port->dev);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004944 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004945 kfree(rocker->ports);
4946}
4947
Simon Horman534ba6a2015-06-01 13:25:04 +09004948static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004949{
Simon Horman534ba6a2015-06-01 13:25:04 +09004950 const struct rocker *rocker = rocker_port->rocker;
Simon Hormane5054642015-05-25 14:28:36 +09004951 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004952 int err;
4953
4954 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4955 rocker_port->dev->dev_addr);
4956 if (err) {
4957 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4958 eth_hw_addr_random(rocker_port->dev);
4959 }
4960}
4961
4962static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4963{
Simon Hormane5054642015-05-25 14:28:36 +09004964 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004965 struct rocker_port *rocker_port;
4966 struct net_device *dev;
Scott Feldmanbcfd7802015-06-01 11:39:04 -07004967 u16 untagged_vid = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004968 int err;
4969
4970 dev = alloc_etherdev(sizeof(struct rocker_port));
4971 if (!dev)
4972 return -ENOMEM;
4973 rocker_port = netdev_priv(dev);
4974 rocker_port->dev = dev;
4975 rocker_port->rocker = rocker;
4976 rocker_port->port_number = port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004977 rocker_port->pport = port_number + 1;
Scott Feldman5111f802014-11-28 14:34:30 +01004978 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
Scott Feldmane7335702015-09-23 08:39:17 -07004979 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004980 INIT_LIST_HEAD(&rocker_port->trans_mem);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004981
Simon Horman534ba6a2015-06-01 13:25:04 +09004982 rocker_port_dev_addr_init(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004983 dev->netdev_ops = &rocker_port_netdev_ops;
4984 dev->ethtool_ops = &rocker_port_ethtool_ops;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004985 dev->switchdev_ops = &rocker_port_switchdev_ops;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004986 netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4987 NAPI_POLL_WEIGHT);
4988 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4989 NAPI_POLL_WEIGHT);
4990 rocker_carrier_init(rocker_port);
4991
Ido Schimmel21518a62015-08-02 20:56:37 +02004992 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004993
4994 err = register_netdev(dev);
4995 if (err) {
4996 dev_err(&pdev->dev, "register_netdev failed\n");
4997 goto err_register_netdev;
4998 }
4999 rocker->ports[port_number] = rocker_port;
5000
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005001 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5002
Scott Feldmanc4f20322015-05-10 09:47:50 -07005003 rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
Scott Feldman5111f802014-11-28 14:34:30 +01005004
Scott Feldmanc4f20322015-05-10 09:47:50 -07005005 err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005006 if (err) {
Scott Feldmanff147022015-08-03 22:31:18 -07005007 netdev_err(rocker_port->dev, "install ig port table failed\n");
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005008 goto err_port_ig_tbl;
5009 }
5010
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005011 rocker_port->internal_vlan_id =
5012 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5013
5014 err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5015 untagged_vid, 0);
5016 if (err) {
5017 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5018 goto err_untagged_vlan;
5019 }
5020
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005021 return 0;
5022
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005023err_untagged_vlan:
5024 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
5025 ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005026err_port_ig_tbl:
Scott Feldman6c4f7782015-08-03 22:31:17 -07005027 rocker->ports[port_number] = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005028 unregister_netdev(dev);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005029err_register_netdev:
5030 free_netdev(dev);
5031 return err;
5032}
5033
5034static int rocker_probe_ports(struct rocker *rocker)
5035{
5036 int i;
5037 size_t alloc_size;
5038 int err;
5039
5040 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
Scott Feldman27b808c2015-06-01 11:39:02 -07005041 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
Dan Carpentere65ad3b2015-02-25 16:35:32 +03005042 if (!rocker->ports)
5043 return -ENOMEM;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005044 for (i = 0; i < rocker->port_count; i++) {
5045 err = rocker_probe_port(rocker, i);
5046 if (err)
5047 goto remove_ports;
5048 }
5049 return 0;
5050
5051remove_ports:
5052 rocker_remove_ports(rocker);
5053 return err;
5054}
5055
5056static int rocker_msix_init(struct rocker *rocker)
5057{
5058 struct pci_dev *pdev = rocker->pdev;
5059 int msix_entries;
5060 int i;
5061 int err;
5062
5063 msix_entries = pci_msix_vec_count(pdev);
5064 if (msix_entries < 0)
5065 return msix_entries;
5066
5067 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5068 return -EINVAL;
5069
5070 rocker->msix_entries = kmalloc_array(msix_entries,
5071 sizeof(struct msix_entry),
5072 GFP_KERNEL);
5073 if (!rocker->msix_entries)
5074 return -ENOMEM;
5075
5076 for (i = 0; i < msix_entries; i++)
5077 rocker->msix_entries[i].entry = i;
5078
5079 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5080 if (err < 0)
5081 goto err_enable_msix;
5082
5083 return 0;
5084
5085err_enable_msix:
5086 kfree(rocker->msix_entries);
5087 return err;
5088}
5089
Simon Hormane5054642015-05-25 14:28:36 +09005090static void rocker_msix_fini(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005091{
5092 pci_disable_msix(rocker->pdev);
5093 kfree(rocker->msix_entries);
5094}
5095
5096static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5097{
5098 struct rocker *rocker;
5099 int err;
5100
5101 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5102 if (!rocker)
5103 return -ENOMEM;
5104
5105 err = pci_enable_device(pdev);
5106 if (err) {
5107 dev_err(&pdev->dev, "pci_enable_device failed\n");
5108 goto err_pci_enable_device;
5109 }
5110
5111 err = pci_request_regions(pdev, rocker_driver_name);
5112 if (err) {
5113 dev_err(&pdev->dev, "pci_request_regions failed\n");
5114 goto err_pci_request_regions;
5115 }
5116
5117 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5118 if (!err) {
5119 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5120 if (err) {
5121 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5122 goto err_pci_set_dma_mask;
5123 }
5124 } else {
5125 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5126 if (err) {
5127 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5128 goto err_pci_set_dma_mask;
5129 }
5130 }
5131
5132 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5133 dev_err(&pdev->dev, "invalid PCI region size\n");
Wei Yongjun3122a922015-04-16 20:21:02 +08005134 err = -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005135 goto err_pci_resource_len_check;
5136 }
5137
5138 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5139 pci_resource_len(pdev, 0));
5140 if (!rocker->hw_addr) {
5141 dev_err(&pdev->dev, "ioremap failed\n");
5142 err = -EIO;
5143 goto err_ioremap;
5144 }
5145 pci_set_master(pdev);
5146
5147 rocker->pdev = pdev;
5148 pci_set_drvdata(pdev, rocker);
5149
5150 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5151
5152 err = rocker_msix_init(rocker);
5153 if (err) {
5154 dev_err(&pdev->dev, "MSI-X init failed\n");
5155 goto err_msix_init;
5156 }
5157
5158 err = rocker_basic_hw_test(rocker);
5159 if (err) {
5160 dev_err(&pdev->dev, "basic hw test failed\n");
5161 goto err_basic_hw_test;
5162 }
5163
5164 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5165
5166 err = rocker_dma_rings_init(rocker);
5167 if (err)
5168 goto err_dma_rings_init;
5169
5170 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5171 rocker_cmd_irq_handler, 0,
5172 rocker_driver_name, rocker);
5173 if (err) {
5174 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5175 goto err_request_cmd_irq;
5176 }
5177
5178 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5179 rocker_event_irq_handler, 0,
5180 rocker_driver_name, rocker);
5181 if (err) {
5182 dev_err(&pdev->dev, "cannot assign event irq\n");
5183 goto err_request_event_irq;
5184 }
5185
5186 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5187
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005188 err = rocker_init_tbls(rocker);
5189 if (err) {
5190 dev_err(&pdev->dev, "cannot init rocker tables\n");
5191 goto err_init_tbls;
5192 }
5193
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005194 err = rocker_probe_ports(rocker);
5195 if (err) {
5196 dev_err(&pdev->dev, "failed to probe ports\n");
5197 goto err_probe_ports;
5198 }
5199
Scott Feldmanc8beb5b2015-08-12 18:44:13 -07005200 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5201 (int)sizeof(rocker->hw.id), &rocker->hw.id);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005202
5203 return 0;
5204
5205err_probe_ports:
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005206 rocker_free_tbls(rocker);
5207err_init_tbls:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005208 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5209err_request_event_irq:
5210 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5211err_request_cmd_irq:
5212 rocker_dma_rings_fini(rocker);
5213err_dma_rings_init:
5214err_basic_hw_test:
5215 rocker_msix_fini(rocker);
5216err_msix_init:
5217 iounmap(rocker->hw_addr);
5218err_ioremap:
5219err_pci_resource_len_check:
5220err_pci_set_dma_mask:
5221 pci_release_regions(pdev);
5222err_pci_request_regions:
5223 pci_disable_device(pdev);
5224err_pci_enable_device:
5225 kfree(rocker);
5226 return err;
5227}
5228
5229static void rocker_remove(struct pci_dev *pdev)
5230{
5231 struct rocker *rocker = pci_get_drvdata(pdev);
5232
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005233 rocker_free_tbls(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005234 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5235 rocker_remove_ports(rocker);
5236 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5237 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5238 rocker_dma_rings_fini(rocker);
5239 rocker_msix_fini(rocker);
5240 iounmap(rocker->hw_addr);
5241 pci_release_regions(rocker->pdev);
5242 pci_disable_device(rocker->pdev);
5243 kfree(rocker);
5244}
5245
5246static struct pci_driver rocker_pci_driver = {
5247 .name = rocker_driver_name,
5248 .id_table = rocker_pci_id_table,
5249 .probe = rocker_probe,
5250 .remove = rocker_remove,
5251};
5252
Scott Feldman6c707942014-11-28 14:34:28 +01005253/************************************
5254 * Net device notifier event handler
5255 ************************************/
5256
Simon Hormane5054642015-05-25 14:28:36 +09005257static bool rocker_port_dev_check(const struct net_device *dev)
Scott Feldman6c707942014-11-28 14:34:28 +01005258{
5259 return dev->netdev_ops == &rocker_port_netdev_ops;
5260}
5261
5262static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5263 struct net_device *bridge)
5264{
Scott Feldman027e00d2015-06-01 11:39:05 -07005265 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005266 int err;
5267
Scott Feldman027e00d2015-06-01 11:39:05 -07005268 /* Port is joining bridge, so the internal VLAN for the
5269 * port is going to change to the bridge internal VLAN.
5270 * Let's remove untagged VLAN (vid=0) from port and
5271 * re-add once internal VLAN has changed.
5272 */
5273
5274 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5275 if (err)
5276 return err;
5277
Simon Hormandf6a2062015-05-21 12:40:17 +09005278 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005279 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005280 rocker_port->internal_vlan_id =
5281 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005282
5283 rocker_port->bridge_dev = bridge;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005284 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
Scott Feldman6c707942014-11-28 14:34:28 +01005285
Scott Feldman027e00d2015-06-01 11:39:05 -07005286 return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5287 untagged_vid, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005288}
5289
5290static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5291{
Scott Feldman027e00d2015-06-01 11:39:05 -07005292 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005293 int err;
5294
Scott Feldman027e00d2015-06-01 11:39:05 -07005295 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5296 if (err)
5297 return err;
5298
Simon Hormandf6a2062015-05-21 12:40:17 +09005299 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005300 rocker_port->bridge_dev->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005301 rocker_port->internal_vlan_id =
5302 rocker_port_internal_vlan_id_get(rocker_port,
5303 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005304
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005305 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5306 false);
Scott Feldman027e00d2015-06-01 11:39:05 -07005307 rocker_port->bridge_dev = NULL;
5308
5309 err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5310 untagged_vid, 0);
Scott Feldmane47172a2015-02-25 20:15:38 -08005311 if (err)
5312 return err;
5313
5314 if (rocker_port->dev->flags & IFF_UP)
Scott Feldman179f9a22015-06-12 21:35:46 -07005315 err = rocker_port_fwd_enable(rocker_port,
5316 SWITCHDEV_TRANS_NONE, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005317
5318 return err;
5319}
5320
Simon Horman82549732015-07-16 10:39:14 +09005321
5322static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5323 struct net_device *master)
5324{
5325 int err;
5326
5327 rocker_port->bridge_dev = master;
5328
5329 err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
5330 if (err)
5331 return err;
5332 err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
5333
5334 return err;
5335}
5336
Jiri Pirko686ed302015-08-27 09:31:23 +02005337static int rocker_port_master_linked(struct rocker_port *rocker_port,
5338 struct net_device *master)
Scott Feldman6c707942014-11-28 14:34:28 +01005339{
Scott Feldman6c707942014-11-28 14:34:28 +01005340 int err = 0;
5341
Jiri Pirko686ed302015-08-27 09:31:23 +02005342 if (netif_is_bridge_master(master))
5343 err = rocker_port_bridge_join(rocker_port, master);
5344 else if (netif_is_ovs_master(master))
5345 err = rocker_port_ovs_changed(rocker_port, master);
5346 return err;
5347}
Scott Feldman6c707942014-11-28 14:34:28 +01005348
Jiri Pirko686ed302015-08-27 09:31:23 +02005349static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5350{
5351 int err = 0;
5352
5353 if (rocker_port_is_bridged(rocker_port))
5354 err = rocker_port_bridge_leave(rocker_port);
5355 else if (rocker_port_is_ovsed(rocker_port))
5356 err = rocker_port_ovs_changed(rocker_port, NULL);
Scott Feldman6c707942014-11-28 14:34:28 +01005357 return err;
5358}
5359
5360static int rocker_netdevice_event(struct notifier_block *unused,
5361 unsigned long event, void *ptr)
5362{
Jiri Pirko686ed302015-08-27 09:31:23 +02005363 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5364 struct netdev_notifier_changeupper_info *info;
5365 struct rocker_port *rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01005366 int err;
5367
Jiri Pirko686ed302015-08-27 09:31:23 +02005368 if (!rocker_port_dev_check(dev))
5369 return NOTIFY_DONE;
5370
Scott Feldman6c707942014-11-28 14:34:28 +01005371 switch (event) {
5372 case NETDEV_CHANGEUPPER:
Jiri Pirko686ed302015-08-27 09:31:23 +02005373 info = ptr;
5374 if (!info->master)
5375 goto out;
5376 rocker_port = netdev_priv(dev);
5377 if (info->linking) {
5378 err = rocker_port_master_linked(rocker_port,
5379 info->upper_dev);
5380 if (err)
5381 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5382 err);
5383 } else {
5384 err = rocker_port_master_unlinked(rocker_port);
5385 if (err)
5386 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5387 err);
5388 }
Scott Feldman6c707942014-11-28 14:34:28 +01005389 break;
5390 }
Jiri Pirko686ed302015-08-27 09:31:23 +02005391out:
Scott Feldman6c707942014-11-28 14:34:28 +01005392 return NOTIFY_DONE;
5393}
5394
5395static struct notifier_block rocker_netdevice_nb __read_mostly = {
5396 .notifier_call = rocker_netdevice_event,
5397};
5398
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005399/************************************
5400 * Net event notifier event handler
5401 ************************************/
5402
5403static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5404{
5405 struct rocker_port *rocker_port = netdev_priv(dev);
Scott Feldman02a9fbf2015-06-12 21:35:47 -07005406 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5407 ROCKER_OP_FLAG_NOWAIT;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005408 __be32 ip_addr = *(__be32 *)n->primary_key;
5409
Scott Feldmanc4f20322015-05-10 09:47:50 -07005410 return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
5411 flags, ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005412}
5413
5414static int rocker_netevent_event(struct notifier_block *unused,
5415 unsigned long event, void *ptr)
5416{
5417 struct net_device *dev;
5418 struct neighbour *n = ptr;
5419 int err;
5420
5421 switch (event) {
5422 case NETEVENT_NEIGH_UPDATE:
5423 if (n->tbl != &arp_tbl)
5424 return NOTIFY_DONE;
5425 dev = n->dev;
5426 if (!rocker_port_dev_check(dev))
5427 return NOTIFY_DONE;
5428 err = rocker_neigh_update(dev, n);
5429 if (err)
5430 netdev_warn(dev,
5431 "failed to handle neigh update (err %d)\n",
5432 err);
5433 break;
5434 }
5435
5436 return NOTIFY_DONE;
5437}
5438
5439static struct notifier_block rocker_netevent_nb __read_mostly = {
5440 .notifier_call = rocker_netevent_event,
5441};
5442
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005443/***********************
5444 * Module init and exit
5445 ***********************/
5446
5447static int __init rocker_module_init(void)
5448{
Scott Feldman6c707942014-11-28 14:34:28 +01005449 int err;
5450
5451 register_netdevice_notifier(&rocker_netdevice_nb);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005452 register_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005453 err = pci_register_driver(&rocker_pci_driver);
5454 if (err)
5455 goto err_pci_register_driver;
5456 return 0;
5457
5458err_pci_register_driver:
Gilad Ben-Yossefa076e6b2015-06-23 10:52:10 +03005459 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005460 unregister_netdevice_notifier(&rocker_netdevice_nb);
5461 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005462}
5463
5464static void __exit rocker_module_exit(void)
5465{
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005466 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005467 unregister_netdevice_notifier(&rocker_netdevice_nb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005468 pci_unregister_driver(&rocker_pci_driver);
5469}
5470
5471module_init(rocker_module_init);
5472module_exit(rocker_module_exit);
5473
5474MODULE_LICENSE("GPL v2");
5475MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5476MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5477MODULE_DESCRIPTION("Rocker switch device driver");
5478MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);