blob: 4ccde93cd07a0bba462c2e448c8a5e8721707da1 [file] [log] [blame]
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010019#include <linux/hashtable.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010020#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
Scott Feldman6c707942014-11-28 14:34:28 +010031#include <linux/if_bridge.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010032#include <linux/bitops.h>
David Aherndb191702015-03-17 20:23:16 -060033#include <linux/ctype.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010034#include <net/switchdev.h>
35#include <net/rtnetlink.h>
Scott Feldmanc1beeef2015-03-05 21:21:20 -080036#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010039#include <asm-generic/io-64-nonatomic-lo-hi.h>
40#include <generated/utsrelease.h>
41
42#include "rocker.h"
43
44static const char rocker_driver_name[] = "rocker";
45
46static const struct pci_device_id rocker_pci_id_table[] = {
47 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
48 {0, }
49};
50
Scott Feldman9f6bbf72014-11-28 14:34:27 +010051struct rocker_flow_tbl_key {
52 u32 priority;
53 enum rocker_of_dpa_table_id tbl_id;
54 union {
55 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080056 u32 in_pport;
57 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010058 enum rocker_of_dpa_table_id goto_tbl;
59 } ig_port;
60 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080061 u32 in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010062 __be16 vlan_id;
63 __be16 vlan_id_mask;
64 enum rocker_of_dpa_table_id goto_tbl;
65 bool untagged;
66 __be16 new_vlan_id;
67 } vlan;
68 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080069 u32 in_pport;
70 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010071 __be16 eth_type;
72 u8 eth_dst[ETH_ALEN];
73 u8 eth_dst_mask[ETH_ALEN];
74 __be16 vlan_id;
75 __be16 vlan_id_mask;
76 enum rocker_of_dpa_table_id goto_tbl;
77 bool copy_to_cpu;
78 } term_mac;
79 struct {
80 __be16 eth_type;
81 __be32 dst4;
82 __be32 dst4_mask;
83 enum rocker_of_dpa_table_id goto_tbl;
84 u32 group_id;
85 } ucast_routing;
86 struct {
87 u8 eth_dst[ETH_ALEN];
88 u8 eth_dst_mask[ETH_ALEN];
89 int has_eth_dst;
90 int has_eth_dst_mask;
91 __be16 vlan_id;
92 u32 tunnel_id;
93 enum rocker_of_dpa_table_id goto_tbl;
94 u32 group_id;
95 bool copy_to_cpu;
96 } bridge;
97 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080098 u32 in_pport;
99 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100100 u8 eth_src[ETH_ALEN];
101 u8 eth_src_mask[ETH_ALEN];
102 u8 eth_dst[ETH_ALEN];
103 u8 eth_dst_mask[ETH_ALEN];
104 __be16 eth_type;
105 __be16 vlan_id;
106 __be16 vlan_id_mask;
107 u8 ip_proto;
108 u8 ip_proto_mask;
109 u8 ip_tos;
110 u8 ip_tos_mask;
111 u32 group_id;
112 } acl;
113 };
114};
115
116struct rocker_flow_tbl_entry {
117 struct hlist_node entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800118 u32 cmd;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100119 u64 cookie;
120 struct rocker_flow_tbl_key key;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800121 size_t key_len;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100122 u32 key_crc32; /* key */
123};
124
125struct rocker_group_tbl_entry {
126 struct hlist_node entry;
127 u32 cmd;
128 u32 group_id; /* key */
129 u16 group_count;
130 u32 *group_ids;
131 union {
132 struct {
133 u8 pop_vlan;
134 } l2_interface;
135 struct {
136 u8 eth_src[ETH_ALEN];
137 u8 eth_dst[ETH_ALEN];
138 __be16 vlan_id;
139 u32 group_id;
140 } l2_rewrite;
141 struct {
142 u8 eth_src[ETH_ALEN];
143 u8 eth_dst[ETH_ALEN];
144 __be16 vlan_id;
145 bool ttl_check;
146 u32 group_id;
147 } l3_unicast;
148 };
149};
150
151struct rocker_fdb_tbl_entry {
152 struct hlist_node entry;
153 u32 key_crc32; /* key */
154 bool learned;
155 struct rocker_fdb_tbl_key {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800156 u32 pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100157 u8 addr[ETH_ALEN];
158 __be16 vlan_id;
159 } key;
160};
161
162struct rocker_internal_vlan_tbl_entry {
163 struct hlist_node entry;
164 int ifindex; /* key */
165 u32 ref_count;
166 __be16 vlan_id;
167};
168
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800169struct rocker_neigh_tbl_entry {
170 struct hlist_node entry;
171 __be32 ip_addr; /* key */
172 struct net_device *dev;
173 u32 ref_count;
174 u32 index;
175 u8 eth_dst[ETH_ALEN];
176 bool ttl_check;
177};
178
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100179struct rocker_desc_info {
180 char *data; /* mapped */
181 size_t data_size;
182 size_t tlv_size;
183 struct rocker_desc *desc;
Scott Feldman4725ceb2015-05-10 09:48:08 -0700184 dma_addr_t mapaddr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100185};
186
187struct rocker_dma_ring_info {
188 size_t size;
189 u32 head;
190 u32 tail;
191 struct rocker_desc *desc; /* mapped */
192 dma_addr_t mapaddr;
193 struct rocker_desc_info *desc_info;
194 unsigned int type;
195};
196
197struct rocker;
198
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100199enum {
200 ROCKER_CTRL_LINK_LOCAL_MCAST,
201 ROCKER_CTRL_LOCAL_ARP,
202 ROCKER_CTRL_IPV4_MCAST,
203 ROCKER_CTRL_IPV6_MCAST,
204 ROCKER_CTRL_DFLT_BRIDGING,
Simon Horman82549732015-07-16 10:39:14 +0900205 ROCKER_CTRL_DFLT_OVS,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100206 ROCKER_CTRL_MAX,
207};
208
209#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
210#define ROCKER_N_INTERNAL_VLANS 255
211#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
212#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
213
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100214struct rocker_port {
215 struct net_device *dev;
Scott Feldman6c707942014-11-28 14:34:28 +0100216 struct net_device *bridge_dev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100217 struct rocker *rocker;
218 unsigned int port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800219 u32 pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100220 __be16 internal_vlan_id;
Scott Feldman6c707942014-11-28 14:34:28 +0100221 int stp_state;
Scott Feldman5111f802014-11-28 14:34:30 +0100222 u32 brport_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100223 bool ctrls[ROCKER_CTRL_MAX];
224 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100225 struct napi_struct napi_tx;
226 struct napi_struct napi_rx;
227 struct rocker_dma_ring_info tx_ring;
228 struct rocker_dma_ring_info rx_ring;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700229 struct list_head trans_mem;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100230};
231
232struct rocker {
233 struct pci_dev *pdev;
234 u8 __iomem *hw_addr;
235 struct msix_entry *msix_entries;
236 unsigned int port_count;
237 struct rocker_port **ports;
238 struct {
239 u64 id;
240 } hw;
Scott Feldman4725ceb2015-05-10 09:48:08 -0700241 spinlock_t cmd_ring_lock; /* for cmd ring accesses */
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100242 struct rocker_dma_ring_info cmd_ring;
243 struct rocker_dma_ring_info event_ring;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100244 DECLARE_HASHTABLE(flow_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700245 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100246 u64 flow_tbl_next_cookie;
247 DECLARE_HASHTABLE(group_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700248 spinlock_t group_tbl_lock; /* for group tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100249 DECLARE_HASHTABLE(fdb_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700250 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100251 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
252 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700253 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800254 DECLARE_HASHTABLE(neigh_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700255 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800256 u32 neigh_tbl_next_index;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100257};
258
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100259static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
260static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
261static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
262static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
263static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
264static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
265static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
266static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
267static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
268
269/* Rocker priority levels for flow table entries. Higher
270 * priority match takes precedence over lower priority match.
271 */
272
273enum {
274 ROCKER_PRIORITY_UNKNOWN = 0,
275 ROCKER_PRIORITY_IG_PORT = 1,
276 ROCKER_PRIORITY_VLAN = 1,
277 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
278 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100279 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
280 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
281 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
282 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
283 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
284 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
285 ROCKER_PRIORITY_ACL_CTRL = 3,
286 ROCKER_PRIORITY_ACL_NORMAL = 2,
287 ROCKER_PRIORITY_ACL_DFLT = 1,
288};
289
290static bool rocker_vlan_id_is_internal(__be16 vlan_id)
291{
292 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
293 u16 end = 0xffe;
294 u16 _vlan_id = ntohs(vlan_id);
295
296 return (_vlan_id >= start && _vlan_id <= end);
297}
298
Simon Hormane5054642015-05-25 14:28:36 +0900299static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100300 u16 vid, bool *pop_vlan)
301{
302 __be16 vlan_id;
303
304 if (pop_vlan)
305 *pop_vlan = false;
306 vlan_id = htons(vid);
307 if (!vlan_id) {
308 vlan_id = rocker_port->internal_vlan_id;
309 if (pop_vlan)
310 *pop_vlan = true;
311 }
312
313 return vlan_id;
314}
315
Simon Hormane5054642015-05-25 14:28:36 +0900316static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +0100317 __be16 vlan_id)
318{
319 if (rocker_vlan_id_is_internal(vlan_id))
320 return 0;
321
322 return ntohs(vlan_id);
323}
324
Simon Horman82549732015-07-16 10:39:14 +0900325static bool rocker_port_is_slave(const struct rocker_port *rocker_port,
326 const char *kind)
327{
328 return rocker_port->bridge_dev &&
329 !strcmp(rocker_port->bridge_dev->rtnl_link_ops->kind, kind);
330}
331
Simon Hormane5054642015-05-25 14:28:36 +0900332static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +0100333{
Simon Horman82549732015-07-16 10:39:14 +0900334 return rocker_port_is_slave(rocker_port, "bridge");
335}
336
337static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
338{
339 return rocker_port_is_slave(rocker_port, "openvswitch");
Scott Feldman6c707942014-11-28 14:34:28 +0100340}
341
Scott Feldman179f9a22015-06-12 21:35:46 -0700342#define ROCKER_OP_FLAG_REMOVE BIT(0)
343#define ROCKER_OP_FLAG_NOWAIT BIT(1)
344#define ROCKER_OP_FLAG_LEARNED BIT(2)
345#define ROCKER_OP_FLAG_REFRESH BIT(3)
346
Scott Feldmanc4f20322015-05-10 09:47:50 -0700347static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700348 enum switchdev_trans trans, int flags,
349 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700350{
351 struct list_head *elem = NULL;
Scott Feldman179f9a22015-06-12 21:35:46 -0700352 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
353 GFP_ATOMIC : GFP_KERNEL;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700354
355 /* If in transaction prepare phase, allocate the memory
356 * and enqueue it on a per-port list. If in transaction
357 * commit phase, dequeue the memory from the per-port list
358 * rather than re-allocating the memory. The idea is the
359 * driver code paths for prepare and commit are identical
360 * so the memory allocated in the prepare phase is the
361 * memory used in the commit phase.
362 */
363
364 switch (trans) {
365 case SWITCHDEV_TRANS_PREPARE:
Scott Feldman179f9a22015-06-12 21:35:46 -0700366 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700367 if (!elem)
368 return NULL;
369 list_add_tail(elem, &rocker_port->trans_mem);
370 break;
371 case SWITCHDEV_TRANS_COMMIT:
372 BUG_ON(list_empty(&rocker_port->trans_mem));
373 elem = rocker_port->trans_mem.next;
374 list_del_init(elem);
375 break;
376 case SWITCHDEV_TRANS_NONE:
Scott Feldman179f9a22015-06-12 21:35:46 -0700377 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700378 if (elem)
379 INIT_LIST_HEAD(elem);
380 break;
381 default:
382 break;
383 }
384
385 return elem ? elem + 1 : NULL;
386}
387
388static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700389 enum switchdev_trans trans, int flags,
390 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700391{
Scott Feldman179f9a22015-06-12 21:35:46 -0700392 return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700393}
394
395static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700396 enum switchdev_trans trans, int flags,
397 size_t n, size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700398{
Scott Feldman179f9a22015-06-12 21:35:46 -0700399 return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700400}
401
Simon Horman0985df72015-05-25 14:28:35 +0900402static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700403{
404 struct list_head *elem;
405
406 /* Frees are ignored if in transaction prepare phase. The
407 * memory remains on the per-port list until freed in the
408 * commit phase.
409 */
410
411 if (trans == SWITCHDEV_TRANS_PREPARE)
412 return;
413
414 elem = (struct list_head *)mem - 1;
415 BUG_ON(!list_empty(elem));
416 kfree(elem);
417}
418
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100419struct rocker_wait {
420 wait_queue_head_t wait;
421 bool done;
Scott Feldman179f9a22015-06-12 21:35:46 -0700422 bool nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100423};
424
425static void rocker_wait_reset(struct rocker_wait *wait)
426{
427 wait->done = false;
Scott Feldman179f9a22015-06-12 21:35:46 -0700428 wait->nowait = false;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100429}
430
431static void rocker_wait_init(struct rocker_wait *wait)
432{
433 init_waitqueue_head(&wait->wait);
434 rocker_wait_reset(wait);
435}
436
Scott Feldmanc4f20322015-05-10 09:47:50 -0700437static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700438 enum switchdev_trans trans,
439 int flags)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100440{
441 struct rocker_wait *wait;
442
Scott Feldman179f9a22015-06-12 21:35:46 -0700443 wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100444 if (!wait)
445 return NULL;
446 rocker_wait_init(wait);
447 return wait;
448}
449
Simon Horman0985df72015-05-25 14:28:35 +0900450static void rocker_wait_destroy(enum switchdev_trans trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -0700451 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100452{
Simon Horman0985df72015-05-25 14:28:35 +0900453 rocker_port_kfree(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100454}
455
456static bool rocker_wait_event_timeout(struct rocker_wait *wait,
457 unsigned long timeout)
458{
459 wait_event_timeout(wait->wait, wait->done, HZ / 10);
460 if (!wait->done)
461 return false;
462 return true;
463}
464
465static void rocker_wait_wake_up(struct rocker_wait *wait)
466{
467 wait->done = true;
468 wake_up(&wait->wait);
469}
470
Simon Hormane5054642015-05-25 14:28:36 +0900471static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100472{
473 return rocker->msix_entries[vector].vector;
474}
475
Simon Hormane5054642015-05-25 14:28:36 +0900476static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100477{
478 return rocker_msix_vector(rocker_port->rocker,
479 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
480}
481
Simon Hormane5054642015-05-25 14:28:36 +0900482static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100483{
484 return rocker_msix_vector(rocker_port->rocker,
485 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
486}
487
488#define rocker_write32(rocker, reg, val) \
489 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
490#define rocker_read32(rocker, reg) \
491 readl((rocker)->hw_addr + (ROCKER_ ## reg))
492#define rocker_write64(rocker, reg, val) \
493 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
494#define rocker_read64(rocker, reg) \
495 readq((rocker)->hw_addr + (ROCKER_ ## reg))
496
497/*****************************
498 * HW basic testing functions
499 *****************************/
500
Simon Hormane5054642015-05-25 14:28:36 +0900501static int rocker_reg_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100502{
Simon Hormane5054642015-05-25 14:28:36 +0900503 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100504 u64 test_reg;
505 u64 rnd;
506
507 rnd = prandom_u32();
508 rnd >>= 1;
509 rocker_write32(rocker, TEST_REG, rnd);
510 test_reg = rocker_read32(rocker, TEST_REG);
511 if (test_reg != rnd * 2) {
512 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
513 test_reg, rnd * 2);
514 return -EIO;
515 }
516
517 rnd = prandom_u32();
518 rnd <<= 31;
519 rnd |= prandom_u32();
520 rocker_write64(rocker, TEST_REG64, rnd);
521 test_reg = rocker_read64(rocker, TEST_REG64);
522 if (test_reg != rnd * 2) {
523 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
524 test_reg, rnd * 2);
525 return -EIO;
526 }
527
528 return 0;
529}
530
Simon Hormane5054642015-05-25 14:28:36 +0900531static int rocker_dma_test_one(const struct rocker *rocker,
532 struct rocker_wait *wait, u32 test_type,
533 dma_addr_t dma_handle, const unsigned char *buf,
534 const unsigned char *expect, size_t size)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100535{
Simon Hormane5054642015-05-25 14:28:36 +0900536 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100537 int i;
538
539 rocker_wait_reset(wait);
540 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
541
542 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
543 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
544 return -EIO;
545 }
546
547 for (i = 0; i < size; i++) {
548 if (buf[i] != expect[i]) {
549 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
550 buf[i], i, expect[i]);
551 return -EIO;
552 }
553 }
554 return 0;
555}
556
557#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
558#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
559
Simon Hormane5054642015-05-25 14:28:36 +0900560static int rocker_dma_test_offset(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100561 struct rocker_wait *wait, int offset)
562{
563 struct pci_dev *pdev = rocker->pdev;
564 unsigned char *alloc;
565 unsigned char *buf;
566 unsigned char *expect;
567 dma_addr_t dma_handle;
568 int i;
569 int err;
570
571 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
572 GFP_KERNEL | GFP_DMA);
573 if (!alloc)
574 return -ENOMEM;
575 buf = alloc + offset;
576 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
577
578 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
579 PCI_DMA_BIDIRECTIONAL);
580 if (pci_dma_mapping_error(pdev, dma_handle)) {
581 err = -EIO;
582 goto free_alloc;
583 }
584
585 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
586 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
587
588 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
589 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
590 dma_handle, buf, expect,
591 ROCKER_TEST_DMA_BUF_SIZE);
592 if (err)
593 goto unmap;
594
595 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
596 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
597 dma_handle, buf, expect,
598 ROCKER_TEST_DMA_BUF_SIZE);
599 if (err)
600 goto unmap;
601
602 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
603 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
604 expect[i] = ~buf[i];
605 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
606 dma_handle, buf, expect,
607 ROCKER_TEST_DMA_BUF_SIZE);
608 if (err)
609 goto unmap;
610
611unmap:
612 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
613 PCI_DMA_BIDIRECTIONAL);
614free_alloc:
615 kfree(alloc);
616
617 return err;
618}
619
Simon Hormane5054642015-05-25 14:28:36 +0900620static int rocker_dma_test(const struct rocker *rocker,
621 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100622{
623 int i;
624 int err;
625
626 for (i = 0; i < 8; i++) {
627 err = rocker_dma_test_offset(rocker, wait, i);
628 if (err)
629 return err;
630 }
631 return 0;
632}
633
634static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
635{
636 struct rocker_wait *wait = dev_id;
637
638 rocker_wait_wake_up(wait);
639
640 return IRQ_HANDLED;
641}
642
Simon Hormane5054642015-05-25 14:28:36 +0900643static int rocker_basic_hw_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100644{
Simon Hormane5054642015-05-25 14:28:36 +0900645 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100646 struct rocker_wait wait;
647 int err;
648
649 err = rocker_reg_test(rocker);
650 if (err) {
651 dev_err(&pdev->dev, "reg test failed\n");
652 return err;
653 }
654
655 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
656 rocker_test_irq_handler, 0,
657 rocker_driver_name, &wait);
658 if (err) {
659 dev_err(&pdev->dev, "cannot assign test irq\n");
660 return err;
661 }
662
663 rocker_wait_init(&wait);
664 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
665
666 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
667 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
668 err = -EIO;
669 goto free_irq;
670 }
671
672 err = rocker_dma_test(rocker, &wait);
673 if (err)
674 dev_err(&pdev->dev, "dma test failed\n");
675
676free_irq:
677 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
678 return err;
679}
680
681/******
682 * TLV
683 ******/
684
685#define ROCKER_TLV_ALIGNTO 8U
686#define ROCKER_TLV_ALIGN(len) \
687 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
688#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
689
690/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
691 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
692 * | Header | Pad | Payload | Pad |
693 * | (struct rocker_tlv) | ing | | ing |
694 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
695 * <--------------------------- tlv->len -------------------------->
696 */
697
698static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
699 int *remaining)
700{
701 int totlen = ROCKER_TLV_ALIGN(tlv->len);
702
703 *remaining -= totlen;
704 return (struct rocker_tlv *) ((char *) tlv + totlen);
705}
706
707static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
708{
709 return remaining >= (int) ROCKER_TLV_HDRLEN &&
710 tlv->len >= ROCKER_TLV_HDRLEN &&
711 tlv->len <= remaining;
712}
713
714#define rocker_tlv_for_each(pos, head, len, rem) \
715 for (pos = head, rem = len; \
716 rocker_tlv_ok(pos, rem); \
717 pos = rocker_tlv_next(pos, &(rem)))
718
719#define rocker_tlv_for_each_nested(pos, tlv, rem) \
720 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
721 rocker_tlv_len(tlv), rem)
722
723static int rocker_tlv_attr_size(int payload)
724{
725 return ROCKER_TLV_HDRLEN + payload;
726}
727
728static int rocker_tlv_total_size(int payload)
729{
730 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
731}
732
733static int rocker_tlv_padlen(int payload)
734{
735 return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
736}
737
738static int rocker_tlv_type(const struct rocker_tlv *tlv)
739{
740 return tlv->type;
741}
742
743static void *rocker_tlv_data(const struct rocker_tlv *tlv)
744{
745 return (char *) tlv + ROCKER_TLV_HDRLEN;
746}
747
748static int rocker_tlv_len(const struct rocker_tlv *tlv)
749{
750 return tlv->len - ROCKER_TLV_HDRLEN;
751}
752
753static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
754{
755 return *(u8 *) rocker_tlv_data(tlv);
756}
757
758static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
759{
760 return *(u16 *) rocker_tlv_data(tlv);
761}
762
Jiri Pirko9b03c712014-12-03 14:14:53 +0100763static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
764{
765 return *(__be16 *) rocker_tlv_data(tlv);
766}
767
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100768static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
769{
770 return *(u32 *) rocker_tlv_data(tlv);
771}
772
773static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
774{
775 return *(u64 *) rocker_tlv_data(tlv);
776}
777
Simon Hormane5054642015-05-25 14:28:36 +0900778static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100779 const char *buf, int buf_len)
780{
781 const struct rocker_tlv *tlv;
782 const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
783 int rem;
784
785 memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
786
787 rocker_tlv_for_each(tlv, head, buf_len, rem) {
788 u32 type = rocker_tlv_type(tlv);
789
790 if (type > 0 && type <= maxtype)
Simon Hormane5054642015-05-25 14:28:36 +0900791 tb[type] = tlv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100792 }
793}
794
Simon Hormane5054642015-05-25 14:28:36 +0900795static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100796 const struct rocker_tlv *tlv)
797{
798 rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
799 rocker_tlv_len(tlv));
800}
801
Simon Hormane5054642015-05-25 14:28:36 +0900802static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
803 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100804{
805 rocker_tlv_parse(tb, maxtype, desc_info->data,
806 desc_info->desc->tlv_size);
807}
808
809static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
810{
811 return (struct rocker_tlv *) ((char *) desc_info->data +
812 desc_info->tlv_size);
813}
814
815static int rocker_tlv_put(struct rocker_desc_info *desc_info,
816 int attrtype, int attrlen, const void *data)
817{
818 int tail_room = desc_info->data_size - desc_info->tlv_size;
819 int total_size = rocker_tlv_total_size(attrlen);
820 struct rocker_tlv *tlv;
821
822 if (unlikely(tail_room < total_size))
823 return -EMSGSIZE;
824
825 tlv = rocker_tlv_start(desc_info);
826 desc_info->tlv_size += total_size;
827 tlv->type = attrtype;
828 tlv->len = rocker_tlv_attr_size(attrlen);
829 memcpy(rocker_tlv_data(tlv), data, attrlen);
830 memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
831 return 0;
832}
833
834static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
835 int attrtype, u8 value)
836{
837 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
838}
839
840static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
841 int attrtype, u16 value)
842{
843 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
844}
845
Jiri Pirko9b03c712014-12-03 14:14:53 +0100846static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
847 int attrtype, __be16 value)
848{
849 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
850}
851
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100852static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
853 int attrtype, u32 value)
854{
855 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
856}
857
Jiri Pirko9b03c712014-12-03 14:14:53 +0100858static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
859 int attrtype, __be32 value)
860{
861 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
862}
863
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100864static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
865 int attrtype, u64 value)
866{
867 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
868}
869
870static struct rocker_tlv *
871rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
872{
873 struct rocker_tlv *start = rocker_tlv_start(desc_info);
874
875 if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
876 return NULL;
877
878 return start;
879}
880
881static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
882 struct rocker_tlv *start)
883{
884 start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
885}
886
887static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +0900888 const struct rocker_tlv *start)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100889{
Simon Hormane5054642015-05-25 14:28:36 +0900890 desc_info->tlv_size = (const char *) start - desc_info->data;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100891}
892
893/******************************************
894 * DMA rings and descriptors manipulations
895 ******************************************/
896
897static u32 __pos_inc(u32 pos, size_t limit)
898{
899 return ++pos == limit ? 0 : pos;
900}
901
Simon Hormane5054642015-05-25 14:28:36 +0900902static int rocker_desc_err(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100903{
Scott Feldman7eb344f2015-02-25 20:15:36 -0800904 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
905
906 switch (err) {
907 case ROCKER_OK:
908 return 0;
909 case -ROCKER_ENOENT:
910 return -ENOENT;
911 case -ROCKER_ENXIO:
912 return -ENXIO;
913 case -ROCKER_ENOMEM:
914 return -ENOMEM;
915 case -ROCKER_EEXIST:
916 return -EEXIST;
917 case -ROCKER_EINVAL:
918 return -EINVAL;
919 case -ROCKER_EMSGSIZE:
920 return -EMSGSIZE;
921 case -ROCKER_ENOTSUP:
922 return -EOPNOTSUPP;
923 case -ROCKER_ENOBUFS:
924 return -ENOBUFS;
925 }
926
927 return -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100928}
929
Simon Hormane5054642015-05-25 14:28:36 +0900930static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100931{
932 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
933}
934
Simon Hormane5054642015-05-25 14:28:36 +0900935static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100936{
937 u32 comp_err = desc_info->desc->comp_err;
938
939 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
940}
941
Simon Hormane5054642015-05-25 14:28:36 +0900942static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100943{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100944 return (void *)(uintptr_t)desc_info->desc->cookie;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100945}
946
Simon Hormane5054642015-05-25 14:28:36 +0900947static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100948 void *ptr)
949{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100950 desc_info->desc->cookie = (uintptr_t) ptr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100951}
952
953static struct rocker_desc_info *
Simon Hormane5054642015-05-25 14:28:36 +0900954rocker_desc_head_get(const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100955{
956 static struct rocker_desc_info *desc_info;
957 u32 head = __pos_inc(info->head, info->size);
958
959 desc_info = &info->desc_info[info->head];
960 if (head == info->tail)
961 return NULL; /* ring full */
962 desc_info->tlv_size = 0;
963 return desc_info;
964}
965
Simon Hormane5054642015-05-25 14:28:36 +0900966static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100967{
968 desc_info->desc->buf_size = desc_info->data_size;
969 desc_info->desc->tlv_size = desc_info->tlv_size;
970}
971
Simon Hormane5054642015-05-25 14:28:36 +0900972static void rocker_desc_head_set(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100973 struct rocker_dma_ring_info *info,
Simon Hormane5054642015-05-25 14:28:36 +0900974 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100975{
976 u32 head = __pos_inc(info->head, info->size);
977
978 BUG_ON(head == info->tail);
979 rocker_desc_commit(desc_info);
980 info->head = head;
981 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
982}
983
984static struct rocker_desc_info *
985rocker_desc_tail_get(struct rocker_dma_ring_info *info)
986{
987 static struct rocker_desc_info *desc_info;
988
989 if (info->tail == info->head)
990 return NULL; /* nothing to be done between head and tail */
991 desc_info = &info->desc_info[info->tail];
992 if (!rocker_desc_gen(desc_info))
993 return NULL; /* gen bit not set, desc is not ready yet */
994 info->tail = __pos_inc(info->tail, info->size);
995 desc_info->tlv_size = desc_info->desc->tlv_size;
996 return desc_info;
997}
998
Simon Hormane5054642015-05-25 14:28:36 +0900999static void rocker_dma_ring_credits_set(const struct rocker *rocker,
1000 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001001 u32 credits)
1002{
1003 if (credits)
1004 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
1005}
1006
1007static unsigned long rocker_dma_ring_size_fix(size_t size)
1008{
1009 return max(ROCKER_DMA_SIZE_MIN,
1010 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
1011}
1012
Simon Hormane5054642015-05-25 14:28:36 +09001013static int rocker_dma_ring_create(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001014 unsigned int type,
1015 size_t size,
1016 struct rocker_dma_ring_info *info)
1017{
1018 int i;
1019
1020 BUG_ON(size != rocker_dma_ring_size_fix(size));
1021 info->size = size;
1022 info->type = type;
1023 info->head = 0;
1024 info->tail = 0;
1025 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
1026 GFP_KERNEL);
1027 if (!info->desc_info)
1028 return -ENOMEM;
1029
1030 info->desc = pci_alloc_consistent(rocker->pdev,
1031 info->size * sizeof(*info->desc),
1032 &info->mapaddr);
1033 if (!info->desc) {
1034 kfree(info->desc_info);
1035 return -ENOMEM;
1036 }
1037
1038 for (i = 0; i < info->size; i++)
1039 info->desc_info[i].desc = &info->desc[i];
1040
1041 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
1042 ROCKER_DMA_DESC_CTRL_RESET);
1043 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
1044 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
1045
1046 return 0;
1047}
1048
Simon Hormane5054642015-05-25 14:28:36 +09001049static void rocker_dma_ring_destroy(const struct rocker *rocker,
1050 const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001051{
1052 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
1053
1054 pci_free_consistent(rocker->pdev,
1055 info->size * sizeof(struct rocker_desc),
1056 info->desc, info->mapaddr);
1057 kfree(info->desc_info);
1058}
1059
Simon Hormane5054642015-05-25 14:28:36 +09001060static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001061 struct rocker_dma_ring_info *info)
1062{
1063 int i;
1064
1065 BUG_ON(info->head || info->tail);
1066
1067 /* When ring is consumer, we need to advance head for each desc.
1068 * That tells hw that the desc is ready to be used by it.
1069 */
1070 for (i = 0; i < info->size - 1; i++)
1071 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
1072 rocker_desc_commit(&info->desc_info[i]);
1073}
1074
Simon Hormane5054642015-05-25 14:28:36 +09001075static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
1076 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001077 int direction, size_t buf_size)
1078{
1079 struct pci_dev *pdev = rocker->pdev;
1080 int i;
1081 int err;
1082
1083 for (i = 0; i < info->size; i++) {
1084 struct rocker_desc_info *desc_info = &info->desc_info[i];
1085 struct rocker_desc *desc = &info->desc[i];
1086 dma_addr_t dma_handle;
1087 char *buf;
1088
1089 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
1090 if (!buf) {
1091 err = -ENOMEM;
1092 goto rollback;
1093 }
1094
1095 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1096 if (pci_dma_mapping_error(pdev, dma_handle)) {
1097 kfree(buf);
1098 err = -EIO;
1099 goto rollback;
1100 }
1101
1102 desc_info->data = buf;
1103 desc_info->data_size = buf_size;
1104 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1105
1106 desc->buf_addr = dma_handle;
1107 desc->buf_size = buf_size;
1108 }
1109 return 0;
1110
1111rollback:
1112 for (i--; i >= 0; i--) {
Simon Hormane5054642015-05-25 14:28:36 +09001113 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001114
1115 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1116 desc_info->data_size, direction);
1117 kfree(desc_info->data);
1118 }
1119 return err;
1120}
1121
Simon Hormane5054642015-05-25 14:28:36 +09001122static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
1123 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001124 int direction)
1125{
1126 struct pci_dev *pdev = rocker->pdev;
1127 int i;
1128
1129 for (i = 0; i < info->size; i++) {
Simon Hormane5054642015-05-25 14:28:36 +09001130 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001131 struct rocker_desc *desc = &info->desc[i];
1132
1133 desc->buf_addr = 0;
1134 desc->buf_size = 0;
1135 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1136 desc_info->data_size, direction);
1137 kfree(desc_info->data);
1138 }
1139}
1140
1141static int rocker_dma_rings_init(struct rocker *rocker)
1142{
Simon Hormane5054642015-05-25 14:28:36 +09001143 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001144 int err;
1145
1146 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1147 ROCKER_DMA_CMD_DEFAULT_SIZE,
1148 &rocker->cmd_ring);
1149 if (err) {
1150 dev_err(&pdev->dev, "failed to create command dma ring\n");
1151 return err;
1152 }
1153
1154 spin_lock_init(&rocker->cmd_ring_lock);
1155
1156 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1157 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1158 if (err) {
1159 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1160 goto err_dma_cmd_ring_bufs_alloc;
1161 }
1162
1163 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1164 ROCKER_DMA_EVENT_DEFAULT_SIZE,
1165 &rocker->event_ring);
1166 if (err) {
1167 dev_err(&pdev->dev, "failed to create event dma ring\n");
1168 goto err_dma_event_ring_create;
1169 }
1170
1171 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1172 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1173 if (err) {
1174 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1175 goto err_dma_event_ring_bufs_alloc;
1176 }
1177 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1178 return 0;
1179
1180err_dma_event_ring_bufs_alloc:
1181 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1182err_dma_event_ring_create:
1183 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1184 PCI_DMA_BIDIRECTIONAL);
1185err_dma_cmd_ring_bufs_alloc:
1186 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1187 return err;
1188}
1189
1190static void rocker_dma_rings_fini(struct rocker *rocker)
1191{
1192 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1193 PCI_DMA_BIDIRECTIONAL);
1194 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1195 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1196 PCI_DMA_BIDIRECTIONAL);
1197 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1198}
1199
Simon Horman534ba6a2015-06-01 13:25:04 +09001200static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001201 struct rocker_desc_info *desc_info,
1202 struct sk_buff *skb, size_t buf_len)
1203{
Simon Horman534ba6a2015-06-01 13:25:04 +09001204 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001205 struct pci_dev *pdev = rocker->pdev;
1206 dma_addr_t dma_handle;
1207
1208 dma_handle = pci_map_single(pdev, skb->data, buf_len,
1209 PCI_DMA_FROMDEVICE);
1210 if (pci_dma_mapping_error(pdev, dma_handle))
1211 return -EIO;
1212 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1213 goto tlv_put_failure;
1214 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1215 goto tlv_put_failure;
1216 return 0;
1217
1218tlv_put_failure:
1219 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1220 desc_info->tlv_size = 0;
1221 return -EMSGSIZE;
1222}
1223
Simon Hormane5054642015-05-25 14:28:36 +09001224static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001225{
1226 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1227}
1228
Simon Horman534ba6a2015-06-01 13:25:04 +09001229static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001230 struct rocker_desc_info *desc_info)
1231{
1232 struct net_device *dev = rocker_port->dev;
1233 struct sk_buff *skb;
1234 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1235 int err;
1236
1237 /* Ensure that hw will see tlv_size zero in case of an error.
1238 * That tells hw to use another descriptor.
1239 */
1240 rocker_desc_cookie_ptr_set(desc_info, NULL);
1241 desc_info->tlv_size = 0;
1242
1243 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1244 if (!skb)
1245 return -ENOMEM;
Simon Horman534ba6a2015-06-01 13:25:04 +09001246 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001247 if (err) {
1248 dev_kfree_skb_any(skb);
1249 return err;
1250 }
1251 rocker_desc_cookie_ptr_set(desc_info, skb);
1252 return 0;
1253}
1254
Simon Hormane5054642015-05-25 14:28:36 +09001255static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1256 const struct rocker_tlv **attrs)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001257{
1258 struct pci_dev *pdev = rocker->pdev;
1259 dma_addr_t dma_handle;
1260 size_t len;
1261
1262 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1263 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1264 return;
1265 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1266 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1267 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1268}
1269
Simon Hormane5054642015-05-25 14:28:36 +09001270static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1271 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001272{
Simon Hormane5054642015-05-25 14:28:36 +09001273 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001274 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1275
1276 if (!skb)
1277 return;
1278 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1279 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1280 dev_kfree_skb_any(skb);
1281}
1282
Simon Horman534ba6a2015-06-01 13:25:04 +09001283static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001284{
Simon Hormane5054642015-05-25 14:28:36 +09001285 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001286 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001287 int i;
1288 int err;
1289
1290 for (i = 0; i < rx_ring->size; i++) {
Simon Horman534ba6a2015-06-01 13:25:04 +09001291 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001292 &rx_ring->desc_info[i]);
1293 if (err)
1294 goto rollback;
1295 }
1296 return 0;
1297
1298rollback:
1299 for (i--; i >= 0; i--)
1300 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1301 return err;
1302}
1303
Simon Horman534ba6a2015-06-01 13:25:04 +09001304static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001305{
Simon Hormane5054642015-05-25 14:28:36 +09001306 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001307 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001308 int i;
1309
1310 for (i = 0; i < rx_ring->size; i++)
1311 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1312}
1313
1314static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1315{
1316 struct rocker *rocker = rocker_port->rocker;
1317 int err;
1318
1319 err = rocker_dma_ring_create(rocker,
1320 ROCKER_DMA_TX(rocker_port->port_number),
1321 ROCKER_DMA_TX_DEFAULT_SIZE,
1322 &rocker_port->tx_ring);
1323 if (err) {
1324 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1325 return err;
1326 }
1327
1328 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1329 PCI_DMA_TODEVICE,
1330 ROCKER_DMA_TX_DESC_SIZE);
1331 if (err) {
1332 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1333 goto err_dma_tx_ring_bufs_alloc;
1334 }
1335
1336 err = rocker_dma_ring_create(rocker,
1337 ROCKER_DMA_RX(rocker_port->port_number),
1338 ROCKER_DMA_RX_DEFAULT_SIZE,
1339 &rocker_port->rx_ring);
1340 if (err) {
1341 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1342 goto err_dma_rx_ring_create;
1343 }
1344
1345 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1346 PCI_DMA_BIDIRECTIONAL,
1347 ROCKER_DMA_RX_DESC_SIZE);
1348 if (err) {
1349 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1350 goto err_dma_rx_ring_bufs_alloc;
1351 }
1352
Simon Horman534ba6a2015-06-01 13:25:04 +09001353 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001354 if (err) {
1355 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1356 goto err_dma_rx_ring_skbs_alloc;
1357 }
1358 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1359
1360 return 0;
1361
1362err_dma_rx_ring_skbs_alloc:
1363 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1364 PCI_DMA_BIDIRECTIONAL);
1365err_dma_rx_ring_bufs_alloc:
1366 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1367err_dma_rx_ring_create:
1368 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1369 PCI_DMA_TODEVICE);
1370err_dma_tx_ring_bufs_alloc:
1371 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1372 return err;
1373}
1374
1375static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1376{
1377 struct rocker *rocker = rocker_port->rocker;
1378
Simon Horman534ba6a2015-06-01 13:25:04 +09001379 rocker_dma_rx_ring_skbs_free(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001380 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1381 PCI_DMA_BIDIRECTIONAL);
1382 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1383 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1384 PCI_DMA_TODEVICE);
1385 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1386}
1387
Simon Hormane5054642015-05-25 14:28:36 +09001388static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1389 bool enable)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001390{
1391 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1392
1393 if (enable)
David S. Miller71a83a62015-03-03 21:16:48 -05001394 val |= 1ULL << rocker_port->pport;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001395 else
David S. Miller71a83a62015-03-03 21:16:48 -05001396 val &= ~(1ULL << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001397 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1398}
1399
1400/********************************
1401 * Interrupt handler and helpers
1402 ********************************/
1403
1404static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1405{
1406 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001407 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001408 struct rocker_wait *wait;
1409 u32 credits = 0;
1410
1411 spin_lock(&rocker->cmd_ring_lock);
1412 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1413 wait = rocker_desc_cookie_ptr_get(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001414 if (wait->nowait) {
1415 rocker_desc_gen_clear(desc_info);
1416 rocker_wait_destroy(SWITCHDEV_TRANS_NONE, wait);
1417 } else {
1418 rocker_wait_wake_up(wait);
1419 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001420 credits++;
1421 }
1422 spin_unlock(&rocker->cmd_ring_lock);
1423 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1424
1425 return IRQ_HANDLED;
1426}
1427
Simon Hormane5054642015-05-25 14:28:36 +09001428static void rocker_port_link_up(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001429{
1430 netif_carrier_on(rocker_port->dev);
1431 netdev_info(rocker_port->dev, "Link is up\n");
1432}
1433
Simon Hormane5054642015-05-25 14:28:36 +09001434static void rocker_port_link_down(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001435{
1436 netif_carrier_off(rocker_port->dev);
1437 netdev_info(rocker_port->dev, "Link is down\n");
1438}
1439
Simon Hormane5054642015-05-25 14:28:36 +09001440static int rocker_event_link_change(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001441 const struct rocker_tlv *info)
1442{
Simon Hormane5054642015-05-25 14:28:36 +09001443 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001444 unsigned int port_number;
1445 bool link_up;
1446 struct rocker_port *rocker_port;
1447
1448 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001449 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001450 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1451 return -EIO;
1452 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001453 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001454 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1455
1456 if (port_number >= rocker->port_count)
1457 return -EINVAL;
1458
1459 rocker_port = rocker->ports[port_number];
1460 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1461 if (link_up)
1462 rocker_port_link_up(rocker_port);
1463 else
1464 rocker_port_link_down(rocker_port);
1465 }
1466
1467 return 0;
1468}
1469
Scott Feldman6c707942014-11-28 14:34:28 +01001470static int rocker_port_fdb(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001471 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01001472 const unsigned char *addr,
1473 __be16 vlan_id, int flags);
1474
Simon Hormane5054642015-05-25 14:28:36 +09001475static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
Scott Feldman6c707942014-11-28 14:34:28 +01001476 const struct rocker_tlv *info)
1477{
Simon Hormane5054642015-05-25 14:28:36 +09001478 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
Scott Feldman6c707942014-11-28 14:34:28 +01001479 unsigned int port_number;
1480 struct rocker_port *rocker_port;
Simon Hormane5054642015-05-25 14:28:36 +09001481 const unsigned char *addr;
Scott Feldman92014b92015-06-12 21:35:49 -07001482 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
Scott Feldman6c707942014-11-28 14:34:28 +01001483 __be16 vlan_id;
1484
1485 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001486 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
Scott Feldman6c707942014-11-28 14:34:28 +01001487 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1488 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1489 return -EIO;
1490 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001491 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
Scott Feldman6c707942014-11-28 14:34:28 +01001492 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
Jiri Pirko9b03c712014-12-03 14:14:53 +01001493 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
Scott Feldman6c707942014-11-28 14:34:28 +01001494
1495 if (port_number >= rocker->port_count)
1496 return -EINVAL;
1497
1498 rocker_port = rocker->ports[port_number];
1499
1500 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1501 rocker_port->stp_state != BR_STATE_FORWARDING)
1502 return 0;
1503
Scott Feldman92014b92015-06-12 21:35:49 -07001504 return rocker_port_fdb(rocker_port, SWITCHDEV_TRANS_NONE,
1505 addr, vlan_id, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01001506}
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001507
Simon Hormane5054642015-05-25 14:28:36 +09001508static int rocker_event_process(const struct rocker *rocker,
1509 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001510{
Simon Hormane5054642015-05-25 14:28:36 +09001511 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1512 const struct rocker_tlv *info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001513 u16 type;
1514
1515 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1516 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1517 !attrs[ROCKER_TLV_EVENT_INFO])
1518 return -EIO;
1519
1520 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1521 info = attrs[ROCKER_TLV_EVENT_INFO];
1522
1523 switch (type) {
1524 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1525 return rocker_event_link_change(rocker, info);
Scott Feldman6c707942014-11-28 14:34:28 +01001526 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1527 return rocker_event_mac_vlan_seen(rocker, info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001528 }
1529
1530 return -EOPNOTSUPP;
1531}
1532
1533static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1534{
1535 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001536 const struct pci_dev *pdev = rocker->pdev;
1537 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001538 u32 credits = 0;
1539 int err;
1540
1541 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1542 err = rocker_desc_err(desc_info);
1543 if (err) {
1544 dev_err(&pdev->dev, "event desc received with err %d\n",
1545 err);
1546 } else {
1547 err = rocker_event_process(rocker, desc_info);
1548 if (err)
1549 dev_err(&pdev->dev, "event processing failed with err %d\n",
1550 err);
1551 }
1552 rocker_desc_gen_clear(desc_info);
1553 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1554 credits++;
1555 }
1556 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1557
1558 return IRQ_HANDLED;
1559}
1560
1561static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1562{
1563 struct rocker_port *rocker_port = dev_id;
1564
1565 napi_schedule(&rocker_port->napi_tx);
1566 return IRQ_HANDLED;
1567}
1568
1569static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1570{
1571 struct rocker_port *rocker_port = dev_id;
1572
1573 napi_schedule(&rocker_port->napi_rx);
1574 return IRQ_HANDLED;
1575}
1576
1577/********************
1578 * Command interface
1579 ********************/
1580
Simon Horman534ba6a2015-06-01 13:25:04 +09001581typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001582 struct rocker_desc_info *desc_info,
1583 void *priv);
1584
Simon Horman534ba6a2015-06-01 13:25:04 +09001585typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001586 const struct rocker_desc_info *desc_info,
1587 void *priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001588
Simon Horman534ba6a2015-06-01 13:25:04 +09001589static int rocker_cmd_exec(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07001590 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09001591 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1592 rocker_cmd_proc_cb_t process, void *process_priv)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001593{
Simon Horman534ba6a2015-06-01 13:25:04 +09001594 struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001595 struct rocker_desc_info *desc_info;
1596 struct rocker_wait *wait;
Scott Feldman179f9a22015-06-12 21:35:46 -07001597 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1598 unsigned long lock_flags;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001599 int err;
1600
Scott Feldman179f9a22015-06-12 21:35:46 -07001601 wait = rocker_wait_create(rocker_port, trans, flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001602 if (!wait)
1603 return -ENOMEM;
Scott Feldman179f9a22015-06-12 21:35:46 -07001604 wait->nowait = nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001605
Scott Feldman179f9a22015-06-12 21:35:46 -07001606 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001607
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001608 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1609 if (!desc_info) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001610 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001611 err = -EAGAIN;
1612 goto out;
1613 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001614
Simon Horman534ba6a2015-06-01 13:25:04 +09001615 err = prepare(rocker_port, desc_info, prepare_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001616 if (err) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001617 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001618 goto out;
1619 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001620
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001621 rocker_desc_cookie_ptr_set(desc_info, wait);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001622
1623 if (trans != SWITCHDEV_TRANS_PREPARE)
1624 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1625
Scott Feldman179f9a22015-06-12 21:35:46 -07001626 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1627
1628 if (nowait)
1629 return 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001630
Scott Feldmanc4f20322015-05-10 09:47:50 -07001631 if (trans != SWITCHDEV_TRANS_PREPARE)
1632 if (!rocker_wait_event_timeout(wait, HZ / 10))
1633 return -EIO;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001634
1635 err = rocker_desc_err(desc_info);
1636 if (err)
1637 return err;
1638
1639 if (process)
Simon Horman534ba6a2015-06-01 13:25:04 +09001640 err = process(rocker_port, desc_info, process_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001641
1642 rocker_desc_gen_clear(desc_info);
1643out:
Simon Horman0985df72015-05-25 14:28:35 +09001644 rocker_wait_destroy(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001645 return err;
1646}
1647
1648static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001649rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001650 struct rocker_desc_info *desc_info,
1651 void *priv)
1652{
1653 struct rocker_tlv *cmd_info;
1654
1655 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1656 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1657 return -EMSGSIZE;
1658 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1659 if (!cmd_info)
1660 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001661 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1662 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001663 return -EMSGSIZE;
1664 rocker_tlv_nest_end(desc_info, cmd_info);
1665 return 0;
1666}
1667
1668static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001669rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001670 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001671 void *priv)
1672{
1673 struct ethtool_cmd *ecmd = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001674 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1675 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001676 u32 speed;
1677 u8 duplex;
1678 u8 autoneg;
1679
1680 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1681 if (!attrs[ROCKER_TLV_CMD_INFO])
1682 return -EIO;
1683
1684 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1685 attrs[ROCKER_TLV_CMD_INFO]);
1686 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1687 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1688 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1689 return -EIO;
1690
1691 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1692 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1693 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1694
1695 ecmd->transceiver = XCVR_INTERNAL;
1696 ecmd->supported = SUPPORTED_TP;
1697 ecmd->phy_address = 0xff;
1698 ecmd->port = PORT_TP;
1699 ethtool_cmd_speed_set(ecmd, speed);
1700 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1701 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1702
1703 return 0;
1704}
1705
1706static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001707rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001708 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001709 void *priv)
1710{
1711 unsigned char *macaddr = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001712 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1713 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1714 const struct rocker_tlv *attr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001715
1716 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1717 if (!attrs[ROCKER_TLV_CMD_INFO])
1718 return -EIO;
1719
1720 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1721 attrs[ROCKER_TLV_CMD_INFO]);
1722 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1723 if (!attr)
1724 return -EIO;
1725
1726 if (rocker_tlv_len(attr) != ETH_ALEN)
1727 return -EINVAL;
1728
1729 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1730 return 0;
1731}
1732
David Aherndb191702015-03-17 20:23:16 -06001733struct port_name {
1734 char *buf;
1735 size_t len;
1736};
1737
1738static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001739rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001740 const struct rocker_desc_info *desc_info,
David Aherndb191702015-03-17 20:23:16 -06001741 void *priv)
1742{
Simon Hormane5054642015-05-25 14:28:36 +09001743 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1744 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
David Aherndb191702015-03-17 20:23:16 -06001745 struct port_name *name = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001746 const struct rocker_tlv *attr;
David Aherndb191702015-03-17 20:23:16 -06001747 size_t i, j, len;
Simon Hormane5054642015-05-25 14:28:36 +09001748 const char *str;
David Aherndb191702015-03-17 20:23:16 -06001749
1750 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1751 if (!attrs[ROCKER_TLV_CMD_INFO])
1752 return -EIO;
1753
1754 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1755 attrs[ROCKER_TLV_CMD_INFO]);
1756 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1757 if (!attr)
1758 return -EIO;
1759
1760 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1761 str = rocker_tlv_data(attr);
1762
1763 /* make sure name only contains alphanumeric characters */
1764 for (i = j = 0; i < len; ++i) {
1765 if (isalnum(str[i])) {
1766 name->buf[j] = str[i];
1767 j++;
1768 }
1769 }
1770
1771 if (j == 0)
1772 return -EIO;
1773
1774 name->buf[j] = '\0';
1775
1776 return 0;
1777}
1778
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001779static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001780rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001781 struct rocker_desc_info *desc_info,
1782 void *priv)
1783{
1784 struct ethtool_cmd *ecmd = priv;
1785 struct rocker_tlv *cmd_info;
1786
1787 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1788 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1789 return -EMSGSIZE;
1790 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1791 if (!cmd_info)
1792 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001793 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1794 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001795 return -EMSGSIZE;
1796 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1797 ethtool_cmd_speed(ecmd)))
1798 return -EMSGSIZE;
1799 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1800 ecmd->duplex))
1801 return -EMSGSIZE;
1802 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1803 ecmd->autoneg))
1804 return -EMSGSIZE;
1805 rocker_tlv_nest_end(desc_info, cmd_info);
1806 return 0;
1807}
1808
1809static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001810rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001811 struct rocker_desc_info *desc_info,
1812 void *priv)
1813{
Simon Hormane5054642015-05-25 14:28:36 +09001814 const unsigned char *macaddr = priv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001815 struct rocker_tlv *cmd_info;
1816
1817 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1818 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1819 return -EMSGSIZE;
1820 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1821 if (!cmd_info)
1822 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001823 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1824 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001825 return -EMSGSIZE;
1826 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1827 ETH_ALEN, macaddr))
1828 return -EMSGSIZE;
1829 rocker_tlv_nest_end(desc_info, cmd_info);
1830 return 0;
1831}
1832
Scott Feldman5111f802014-11-28 14:34:30 +01001833static int
Scott Feldman77a58c72015-07-08 16:06:47 -07001834rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1835 struct rocker_desc_info *desc_info,
1836 void *priv)
1837{
1838 int mtu = *(int *)priv;
1839 struct rocker_tlv *cmd_info;
1840
1841 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1842 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1843 return -EMSGSIZE;
1844 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1845 if (!cmd_info)
1846 return -EMSGSIZE;
1847 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1848 rocker_port->pport))
1849 return -EMSGSIZE;
1850 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1851 mtu))
1852 return -EMSGSIZE;
1853 rocker_tlv_nest_end(desc_info, cmd_info);
1854 return 0;
1855}
1856
1857static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001858rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
Scott Feldman5111f802014-11-28 14:34:30 +01001859 struct rocker_desc_info *desc_info,
1860 void *priv)
1861{
1862 struct rocker_tlv *cmd_info;
1863
1864 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1865 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1866 return -EMSGSIZE;
1867 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1868 if (!cmd_info)
1869 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001870 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1871 rocker_port->pport))
Scott Feldman5111f802014-11-28 14:34:30 +01001872 return -EMSGSIZE;
1873 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1874 !!(rocker_port->brport_flags & BR_LEARNING)))
1875 return -EMSGSIZE;
1876 rocker_tlv_nest_end(desc_info, cmd_info);
1877 return 0;
1878}
1879
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001880static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1881 struct ethtool_cmd *ecmd)
1882{
Scott Feldman179f9a22015-06-12 21:35:46 -07001883 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001884 rocker_cmd_get_port_settings_prep, NULL,
1885 rocker_cmd_get_port_settings_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001886 ecmd);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001887}
1888
1889static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1890 unsigned char *macaddr)
1891{
Scott Feldman179f9a22015-06-12 21:35:46 -07001892 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001893 rocker_cmd_get_port_settings_prep, NULL,
1894 rocker_cmd_get_port_settings_macaddr_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001895 macaddr);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001896}
1897
1898static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1899 struct ethtool_cmd *ecmd)
1900{
Scott Feldman179f9a22015-06-12 21:35:46 -07001901 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001902 rocker_cmd_set_port_settings_ethtool_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001903 ecmd, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001904}
1905
1906static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1907 unsigned char *macaddr)
1908{
Scott Feldman179f9a22015-06-12 21:35:46 -07001909 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001910 rocker_cmd_set_port_settings_macaddr_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001911 macaddr, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001912}
1913
Scott Feldman77a58c72015-07-08 16:06:47 -07001914static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1915 int mtu)
1916{
1917 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
1918 rocker_cmd_set_port_settings_mtu_prep,
1919 &mtu, NULL, NULL);
1920}
1921
Scott Feldmanc4f20322015-05-10 09:47:50 -07001922static int rocker_port_set_learning(struct rocker_port *rocker_port,
1923 enum switchdev_trans trans)
Scott Feldman5111f802014-11-28 14:34:30 +01001924{
Scott Feldman179f9a22015-06-12 21:35:46 -07001925 return rocker_cmd_exec(rocker_port, trans, 0,
Scott Feldman5111f802014-11-28 14:34:30 +01001926 rocker_cmd_set_port_learning_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001927 NULL, NULL, NULL);
Scott Feldman5111f802014-11-28 14:34:30 +01001928}
1929
Simon Hormane5054642015-05-25 14:28:36 +09001930static int
1931rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1932 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001933{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001934 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1935 entry->key.ig_port.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001936 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001937 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1938 entry->key.ig_port.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001939 return -EMSGSIZE;
1940 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1941 entry->key.ig_port.goto_tbl))
1942 return -EMSGSIZE;
1943
1944 return 0;
1945}
1946
Simon Hormane5054642015-05-25 14:28:36 +09001947static int
1948rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1949 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001950{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001951 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1952 entry->key.vlan.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001953 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001954 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1955 entry->key.vlan.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001956 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001957 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1958 entry->key.vlan.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001959 return -EMSGSIZE;
1960 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1961 entry->key.vlan.goto_tbl))
1962 return -EMSGSIZE;
1963 if (entry->key.vlan.untagged &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01001964 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1965 entry->key.vlan.new_vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001966 return -EMSGSIZE;
1967
1968 return 0;
1969}
1970
Simon Hormane5054642015-05-25 14:28:36 +09001971static int
1972rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1973 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001974{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001975 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1976 entry->key.term_mac.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001977 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001978 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1979 entry->key.term_mac.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001980 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001981 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1982 entry->key.term_mac.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001983 return -EMSGSIZE;
1984 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1985 ETH_ALEN, entry->key.term_mac.eth_dst))
1986 return -EMSGSIZE;
1987 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1988 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1989 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001990 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1991 entry->key.term_mac.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001992 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001993 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1994 entry->key.term_mac.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001995 return -EMSGSIZE;
1996 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1997 entry->key.term_mac.goto_tbl))
1998 return -EMSGSIZE;
1999 if (entry->key.term_mac.copy_to_cpu &&
2000 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2001 entry->key.term_mac.copy_to_cpu))
2002 return -EMSGSIZE;
2003
2004 return 0;
2005}
2006
2007static int
2008rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002009 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002010{
Jiri Pirko9b03c712014-12-03 14:14:53 +01002011 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2012 entry->key.ucast_routing.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002013 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002014 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2015 entry->key.ucast_routing.dst4))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002016 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002017 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2018 entry->key.ucast_routing.dst4_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002019 return -EMSGSIZE;
2020 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2021 entry->key.ucast_routing.goto_tbl))
2022 return -EMSGSIZE;
2023 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2024 entry->key.ucast_routing.group_id))
2025 return -EMSGSIZE;
2026
2027 return 0;
2028}
2029
Simon Hormane5054642015-05-25 14:28:36 +09002030static int
2031rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2032 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002033{
2034 if (entry->key.bridge.has_eth_dst &&
2035 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2036 ETH_ALEN, entry->key.bridge.eth_dst))
2037 return -EMSGSIZE;
2038 if (entry->key.bridge.has_eth_dst_mask &&
2039 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2040 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2041 return -EMSGSIZE;
2042 if (entry->key.bridge.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002043 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2044 entry->key.bridge.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002045 return -EMSGSIZE;
2046 if (entry->key.bridge.tunnel_id &&
2047 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2048 entry->key.bridge.tunnel_id))
2049 return -EMSGSIZE;
2050 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2051 entry->key.bridge.goto_tbl))
2052 return -EMSGSIZE;
2053 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2054 entry->key.bridge.group_id))
2055 return -EMSGSIZE;
2056 if (entry->key.bridge.copy_to_cpu &&
2057 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2058 entry->key.bridge.copy_to_cpu))
2059 return -EMSGSIZE;
2060
2061 return 0;
2062}
2063
Simon Hormane5054642015-05-25 14:28:36 +09002064static int
2065rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2066 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002067{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002068 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2069 entry->key.acl.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002070 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002071 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2072 entry->key.acl.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002073 return -EMSGSIZE;
2074 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2075 ETH_ALEN, entry->key.acl.eth_src))
2076 return -EMSGSIZE;
2077 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2078 ETH_ALEN, entry->key.acl.eth_src_mask))
2079 return -EMSGSIZE;
2080 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2081 ETH_ALEN, entry->key.acl.eth_dst))
2082 return -EMSGSIZE;
2083 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2084 ETH_ALEN, entry->key.acl.eth_dst_mask))
2085 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002086 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2087 entry->key.acl.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002088 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002089 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2090 entry->key.acl.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002091 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002092 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2093 entry->key.acl.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002094 return -EMSGSIZE;
2095
2096 switch (ntohs(entry->key.acl.eth_type)) {
2097 case ETH_P_IP:
2098 case ETH_P_IPV6:
2099 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2100 entry->key.acl.ip_proto))
2101 return -EMSGSIZE;
2102 if (rocker_tlv_put_u8(desc_info,
2103 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2104 entry->key.acl.ip_proto_mask))
2105 return -EMSGSIZE;
2106 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2107 entry->key.acl.ip_tos & 0x3f))
2108 return -EMSGSIZE;
2109 if (rocker_tlv_put_u8(desc_info,
2110 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2111 entry->key.acl.ip_tos_mask & 0x3f))
2112 return -EMSGSIZE;
2113 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2114 (entry->key.acl.ip_tos & 0xc0) >> 6))
2115 return -EMSGSIZE;
2116 if (rocker_tlv_put_u8(desc_info,
2117 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2118 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2119 return -EMSGSIZE;
2120 break;
2121 }
2122
2123 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2124 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2125 entry->key.acl.group_id))
2126 return -EMSGSIZE;
2127
2128 return 0;
2129}
2130
Simon Horman534ba6a2015-06-01 13:25:04 +09002131static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002132 struct rocker_desc_info *desc_info,
2133 void *priv)
2134{
Simon Hormane5054642015-05-25 14:28:36 +09002135 const struct rocker_flow_tbl_entry *entry = priv;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002136 struct rocker_tlv *cmd_info;
2137 int err = 0;
2138
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002139 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002140 return -EMSGSIZE;
2141 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2142 if (!cmd_info)
2143 return -EMSGSIZE;
2144 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2145 entry->key.tbl_id))
2146 return -EMSGSIZE;
2147 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2148 entry->key.priority))
2149 return -EMSGSIZE;
2150 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2151 return -EMSGSIZE;
2152 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2153 entry->cookie))
2154 return -EMSGSIZE;
2155
2156 switch (entry->key.tbl_id) {
2157 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2158 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2159 break;
2160 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2161 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2162 break;
2163 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2164 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2165 break;
2166 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2167 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2168 break;
2169 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2170 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2171 break;
2172 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2173 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2174 break;
2175 default:
2176 err = -ENOTSUPP;
2177 break;
2178 }
2179
2180 if (err)
2181 return err;
2182
2183 rocker_tlv_nest_end(desc_info, cmd_info);
2184
2185 return 0;
2186}
2187
Simon Horman534ba6a2015-06-01 13:25:04 +09002188static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002189 struct rocker_desc_info *desc_info,
2190 void *priv)
2191{
2192 const struct rocker_flow_tbl_entry *entry = priv;
2193 struct rocker_tlv *cmd_info;
2194
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002195 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002196 return -EMSGSIZE;
2197 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2198 if (!cmd_info)
2199 return -EMSGSIZE;
2200 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2201 entry->cookie))
2202 return -EMSGSIZE;
2203 rocker_tlv_nest_end(desc_info, cmd_info);
2204
2205 return 0;
2206}
2207
2208static int
2209rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2210 struct rocker_group_tbl_entry *entry)
2211{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002212 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002213 ROCKER_GROUP_PORT_GET(entry->group_id)))
2214 return -EMSGSIZE;
2215 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2216 entry->l2_interface.pop_vlan))
2217 return -EMSGSIZE;
2218
2219 return 0;
2220}
2221
2222static int
2223rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002224 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002225{
2226 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2227 entry->l2_rewrite.group_id))
2228 return -EMSGSIZE;
2229 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2230 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2231 ETH_ALEN, entry->l2_rewrite.eth_src))
2232 return -EMSGSIZE;
2233 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2234 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2235 ETH_ALEN, entry->l2_rewrite.eth_dst))
2236 return -EMSGSIZE;
2237 if (entry->l2_rewrite.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002238 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2239 entry->l2_rewrite.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002240 return -EMSGSIZE;
2241
2242 return 0;
2243}
2244
2245static int
2246rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002247 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002248{
2249 int i;
2250 struct rocker_tlv *group_ids;
2251
2252 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2253 entry->group_count))
2254 return -EMSGSIZE;
2255
2256 group_ids = rocker_tlv_nest_start(desc_info,
2257 ROCKER_TLV_OF_DPA_GROUP_IDS);
2258 if (!group_ids)
2259 return -EMSGSIZE;
2260
2261 for (i = 0; i < entry->group_count; i++)
2262 /* Note TLV array is 1-based */
2263 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2264 return -EMSGSIZE;
2265
2266 rocker_tlv_nest_end(desc_info, group_ids);
2267
2268 return 0;
2269}
2270
2271static int
2272rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002273 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002274{
2275 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2276 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2277 ETH_ALEN, entry->l3_unicast.eth_src))
2278 return -EMSGSIZE;
2279 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2280 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2281 ETH_ALEN, entry->l3_unicast.eth_dst))
2282 return -EMSGSIZE;
2283 if (entry->l3_unicast.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002284 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2285 entry->l3_unicast.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002286 return -EMSGSIZE;
2287 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2288 entry->l3_unicast.ttl_check))
2289 return -EMSGSIZE;
2290 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2291 entry->l3_unicast.group_id))
2292 return -EMSGSIZE;
2293
2294 return 0;
2295}
2296
Simon Horman534ba6a2015-06-01 13:25:04 +09002297static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002298 struct rocker_desc_info *desc_info,
2299 void *priv)
2300{
2301 struct rocker_group_tbl_entry *entry = priv;
2302 struct rocker_tlv *cmd_info;
2303 int err = 0;
2304
2305 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2306 return -EMSGSIZE;
2307 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2308 if (!cmd_info)
2309 return -EMSGSIZE;
2310
2311 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2312 entry->group_id))
2313 return -EMSGSIZE;
2314
2315 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2316 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2317 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2318 break;
2319 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2320 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2321 break;
2322 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2323 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2324 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2325 break;
2326 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2327 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2328 break;
2329 default:
2330 err = -ENOTSUPP;
2331 break;
2332 }
2333
2334 if (err)
2335 return err;
2336
2337 rocker_tlv_nest_end(desc_info, cmd_info);
2338
2339 return 0;
2340}
2341
Simon Horman534ba6a2015-06-01 13:25:04 +09002342static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002343 struct rocker_desc_info *desc_info,
2344 void *priv)
2345{
2346 const struct rocker_group_tbl_entry *entry = priv;
2347 struct rocker_tlv *cmd_info;
2348
2349 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2350 return -EMSGSIZE;
2351 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2352 if (!cmd_info)
2353 return -EMSGSIZE;
2354 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2355 entry->group_id))
2356 return -EMSGSIZE;
2357 rocker_tlv_nest_end(desc_info, cmd_info);
2358
2359 return 0;
2360}
2361
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002362/***************************************************
2363 * Flow, group, FDB, internal VLAN and neigh tables
2364 ***************************************************/
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002365
2366static int rocker_init_tbls(struct rocker *rocker)
2367{
2368 hash_init(rocker->flow_tbl);
2369 spin_lock_init(&rocker->flow_tbl_lock);
2370
2371 hash_init(rocker->group_tbl);
2372 spin_lock_init(&rocker->group_tbl_lock);
2373
2374 hash_init(rocker->fdb_tbl);
2375 spin_lock_init(&rocker->fdb_tbl_lock);
2376
2377 hash_init(rocker->internal_vlan_tbl);
2378 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2379
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002380 hash_init(rocker->neigh_tbl);
2381 spin_lock_init(&rocker->neigh_tbl_lock);
2382
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002383 return 0;
2384}
2385
2386static void rocker_free_tbls(struct rocker *rocker)
2387{
2388 unsigned long flags;
2389 struct rocker_flow_tbl_entry *flow_entry;
2390 struct rocker_group_tbl_entry *group_entry;
2391 struct rocker_fdb_tbl_entry *fdb_entry;
2392 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002393 struct rocker_neigh_tbl_entry *neigh_entry;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002394 struct hlist_node *tmp;
2395 int bkt;
2396
2397 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2398 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2399 hash_del(&flow_entry->entry);
2400 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2401
2402 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2403 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2404 hash_del(&group_entry->entry);
2405 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2406
2407 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2408 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2409 hash_del(&fdb_entry->entry);
2410 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2411
2412 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2413 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2414 tmp, internal_vlan_entry, entry)
2415 hash_del(&internal_vlan_entry->entry);
2416 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002417
2418 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2419 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2420 hash_del(&neigh_entry->entry);
2421 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002422}
2423
2424static struct rocker_flow_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002425rocker_flow_tbl_find(const struct rocker *rocker,
2426 const struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002427{
2428 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002429 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002430
2431 hash_for_each_possible(rocker->flow_tbl, found,
2432 entry, match->key_crc32) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002433 if (memcmp(&found->key, &match->key, key_len) == 0)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002434 return found;
2435 }
2436
2437 return NULL;
2438}
2439
2440static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002441 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002442 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002443{
2444 struct rocker *rocker = rocker_port->rocker;
2445 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002446 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002447 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002448
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002449 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002450
Scott Feldman179f9a22015-06-12 21:35:46 -07002451 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002452
2453 found = rocker_flow_tbl_find(rocker, match);
2454
2455 if (found) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002456 match->cookie = found->cookie;
Scott Feldmanc4f20322015-05-10 09:47:50 -07002457 if (trans != SWITCHDEV_TRANS_PREPARE)
2458 hash_del(&found->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002459 rocker_port_kfree(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002460 found = match;
2461 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002462 } else {
2463 found = match;
2464 found->cookie = rocker->flow_tbl_next_cookie++;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002465 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002466 }
2467
Scott Feldmanc4f20322015-05-10 09:47:50 -07002468 if (trans != SWITCHDEV_TRANS_PREPARE)
2469 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002470
Scott Feldman179f9a22015-06-12 21:35:46 -07002471 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002472
Scott Feldman179f9a22015-06-12 21:35:46 -07002473 return rocker_cmd_exec(rocker_port, trans, flags,
2474 rocker_cmd_flow_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002475}
2476
2477static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002478 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002479 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002480{
2481 struct rocker *rocker = rocker_port->rocker;
2482 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002483 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002484 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002485 int err = 0;
2486
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002487 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002488
Scott Feldman179f9a22015-06-12 21:35:46 -07002489 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002490
2491 found = rocker_flow_tbl_find(rocker, match);
2492
2493 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002494 if (trans != SWITCHDEV_TRANS_PREPARE)
2495 hash_del(&found->entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002496 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002497 }
2498
Scott Feldman179f9a22015-06-12 21:35:46 -07002499 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002500
Simon Horman0985df72015-05-25 14:28:35 +09002501 rocker_port_kfree(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002502
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002503 if (found) {
Scott Feldman179f9a22015-06-12 21:35:46 -07002504 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002505 rocker_cmd_flow_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002506 found, NULL, NULL);
Simon Horman0985df72015-05-25 14:28:35 +09002507 rocker_port_kfree(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002508 }
2509
2510 return err;
2511}
2512
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002513static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002514 enum switchdev_trans trans, int flags,
2515 struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002516{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002517 if (flags & ROCKER_OP_FLAG_REMOVE)
Scott Feldman179f9a22015-06-12 21:35:46 -07002518 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002519 else
Scott Feldman179f9a22015-06-12 21:35:46 -07002520 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002521}
2522
2523static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002524 enum switchdev_trans trans, int flags,
2525 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002526 enum rocker_of_dpa_table_id goto_tbl)
2527{
2528 struct rocker_flow_tbl_entry *entry;
2529
Scott Feldman179f9a22015-06-12 21:35:46 -07002530 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002531 if (!entry)
2532 return -ENOMEM;
2533
2534 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2535 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002536 entry->key.ig_port.in_pport = in_pport;
2537 entry->key.ig_port.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002538 entry->key.ig_port.goto_tbl = goto_tbl;
2539
Scott Feldmanc4f20322015-05-10 09:47:50 -07002540 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002541}
2542
2543static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002544 enum switchdev_trans trans, int flags,
2545 u32 in_pport, __be16 vlan_id,
2546 __be16 vlan_id_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002547 enum rocker_of_dpa_table_id goto_tbl,
2548 bool untagged, __be16 new_vlan_id)
2549{
2550 struct rocker_flow_tbl_entry *entry;
2551
Scott Feldman179f9a22015-06-12 21:35:46 -07002552 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002553 if (!entry)
2554 return -ENOMEM;
2555
2556 entry->key.priority = ROCKER_PRIORITY_VLAN;
2557 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002558 entry->key.vlan.in_pport = in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002559 entry->key.vlan.vlan_id = vlan_id;
2560 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2561 entry->key.vlan.goto_tbl = goto_tbl;
2562
2563 entry->key.vlan.untagged = untagged;
2564 entry->key.vlan.new_vlan_id = new_vlan_id;
2565
Scott Feldmanc4f20322015-05-10 09:47:50 -07002566 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002567}
2568
2569static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002570 enum switchdev_trans trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002571 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002572 __be16 eth_type, const u8 *eth_dst,
2573 const u8 *eth_dst_mask, __be16 vlan_id,
2574 __be16 vlan_id_mask, bool copy_to_cpu,
2575 int flags)
2576{
2577 struct rocker_flow_tbl_entry *entry;
2578
Scott Feldman179f9a22015-06-12 21:35:46 -07002579 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002580 if (!entry)
2581 return -ENOMEM;
2582
2583 if (is_multicast_ether_addr(eth_dst)) {
2584 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2585 entry->key.term_mac.goto_tbl =
2586 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2587 } else {
2588 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2589 entry->key.term_mac.goto_tbl =
2590 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2591 }
2592
2593 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002594 entry->key.term_mac.in_pport = in_pport;
2595 entry->key.term_mac.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002596 entry->key.term_mac.eth_type = eth_type;
2597 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2598 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2599 entry->key.term_mac.vlan_id = vlan_id;
2600 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2601 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2602
Scott Feldmanc4f20322015-05-10 09:47:50 -07002603 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002604}
2605
2606static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002607 enum switchdev_trans trans, int flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002608 const u8 *eth_dst, const u8 *eth_dst_mask,
2609 __be16 vlan_id, u32 tunnel_id,
2610 enum rocker_of_dpa_table_id goto_tbl,
2611 u32 group_id, bool copy_to_cpu)
2612{
2613 struct rocker_flow_tbl_entry *entry;
2614 u32 priority;
2615 bool vlan_bridging = !!vlan_id;
2616 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2617 bool wild = false;
2618
Scott Feldman179f9a22015-06-12 21:35:46 -07002619 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002620 if (!entry)
2621 return -ENOMEM;
2622
2623 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2624
2625 if (eth_dst) {
2626 entry->key.bridge.has_eth_dst = 1;
2627 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2628 }
2629 if (eth_dst_mask) {
2630 entry->key.bridge.has_eth_dst_mask = 1;
2631 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
Simon Horman629161f2015-04-30 15:21:29 +09002632 if (!ether_addr_equal(eth_dst_mask, ff_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002633 wild = true;
2634 }
2635
2636 priority = ROCKER_PRIORITY_UNKNOWN;
Thomas Graf51ace882014-11-28 14:34:32 +01002637 if (vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002638 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002639 else if (vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002640 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002641 else if (vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002642 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
Thomas Graf51ace882014-11-28 14:34:32 +01002643 else if (!vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002644 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002645 else if (!vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002646 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002647 else if (!vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002648 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2649
2650 entry->key.priority = priority;
2651 entry->key.bridge.vlan_id = vlan_id;
2652 entry->key.bridge.tunnel_id = tunnel_id;
2653 entry->key.bridge.goto_tbl = goto_tbl;
2654 entry->key.bridge.group_id = group_id;
2655 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2656
Scott Feldmanc4f20322015-05-10 09:47:50 -07002657 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002658}
2659
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002660static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002661 enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002662 __be16 eth_type, __be32 dst,
2663 __be32 dst_mask, u32 priority,
2664 enum rocker_of_dpa_table_id goto_tbl,
2665 u32 group_id, int flags)
2666{
2667 struct rocker_flow_tbl_entry *entry;
2668
Scott Feldman179f9a22015-06-12 21:35:46 -07002669 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002670 if (!entry)
2671 return -ENOMEM;
2672
2673 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2674 entry->key.priority = priority;
2675 entry->key.ucast_routing.eth_type = eth_type;
2676 entry->key.ucast_routing.dst4 = dst;
2677 entry->key.ucast_routing.dst4_mask = dst_mask;
2678 entry->key.ucast_routing.goto_tbl = goto_tbl;
2679 entry->key.ucast_routing.group_id = group_id;
2680 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2681 ucast_routing.group_id);
2682
Scott Feldmanc4f20322015-05-10 09:47:50 -07002683 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002684}
2685
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002686static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002687 enum switchdev_trans trans, int flags,
2688 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002689 const u8 *eth_src, const u8 *eth_src_mask,
2690 const u8 *eth_dst, const u8 *eth_dst_mask,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002691 __be16 eth_type, __be16 vlan_id,
2692 __be16 vlan_id_mask, u8 ip_proto,
2693 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002694 u32 group_id)
2695{
2696 u32 priority;
2697 struct rocker_flow_tbl_entry *entry;
2698
Scott Feldman179f9a22015-06-12 21:35:46 -07002699 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002700 if (!entry)
2701 return -ENOMEM;
2702
2703 priority = ROCKER_PRIORITY_ACL_NORMAL;
2704 if (eth_dst && eth_dst_mask) {
Simon Horman629161f2015-04-30 15:21:29 +09002705 if (ether_addr_equal(eth_dst_mask, mcast_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002706 priority = ROCKER_PRIORITY_ACL_DFLT;
2707 else if (is_link_local_ether_addr(eth_dst))
2708 priority = ROCKER_PRIORITY_ACL_CTRL;
2709 }
2710
2711 entry->key.priority = priority;
2712 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002713 entry->key.acl.in_pport = in_pport;
2714 entry->key.acl.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002715
2716 if (eth_src)
2717 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2718 if (eth_src_mask)
2719 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2720 if (eth_dst)
2721 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2722 if (eth_dst_mask)
2723 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2724
2725 entry->key.acl.eth_type = eth_type;
2726 entry->key.acl.vlan_id = vlan_id;
2727 entry->key.acl.vlan_id_mask = vlan_id_mask;
2728 entry->key.acl.ip_proto = ip_proto;
2729 entry->key.acl.ip_proto_mask = ip_proto_mask;
2730 entry->key.acl.ip_tos = ip_tos;
2731 entry->key.acl.ip_tos_mask = ip_tos_mask;
2732 entry->key.acl.group_id = group_id;
2733
Scott Feldmanc4f20322015-05-10 09:47:50 -07002734 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002735}
2736
2737static struct rocker_group_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002738rocker_group_tbl_find(const struct rocker *rocker,
2739 const struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002740{
2741 struct rocker_group_tbl_entry *found;
2742
2743 hash_for_each_possible(rocker->group_tbl, found,
2744 entry, match->group_id) {
2745 if (found->group_id == match->group_id)
2746 return found;
2747 }
2748
2749 return NULL;
2750}
2751
Simon Horman0985df72015-05-25 14:28:35 +09002752static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002753 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002754{
2755 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2756 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2757 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
Simon Horman0985df72015-05-25 14:28:35 +09002758 rocker_port_kfree(trans, entry->group_ids);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002759 break;
2760 default:
2761 break;
2762 }
Simon Horman0985df72015-05-25 14:28:35 +09002763 rocker_port_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002764}
2765
2766static int rocker_group_tbl_add(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002767 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002768 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002769{
2770 struct rocker *rocker = rocker_port->rocker;
2771 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002772 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002773
Scott Feldman179f9a22015-06-12 21:35:46 -07002774 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002775
2776 found = rocker_group_tbl_find(rocker, match);
2777
2778 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002779 if (trans != SWITCHDEV_TRANS_PREPARE)
2780 hash_del(&found->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002781 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002782 found = match;
2783 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2784 } else {
2785 found = match;
2786 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2787 }
2788
Scott Feldmanc4f20322015-05-10 09:47:50 -07002789 if (trans != SWITCHDEV_TRANS_PREPARE)
2790 hash_add(rocker->group_tbl, &found->entry, found->group_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002791
Scott Feldman179f9a22015-06-12 21:35:46 -07002792 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002793
Scott Feldman179f9a22015-06-12 21:35:46 -07002794 return rocker_cmd_exec(rocker_port, trans, flags,
2795 rocker_cmd_group_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002796}
2797
2798static int rocker_group_tbl_del(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002799 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002800 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002801{
2802 struct rocker *rocker = rocker_port->rocker;
2803 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002804 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002805 int err = 0;
2806
Scott Feldman179f9a22015-06-12 21:35:46 -07002807 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002808
2809 found = rocker_group_tbl_find(rocker, match);
2810
2811 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002812 if (trans != SWITCHDEV_TRANS_PREPARE)
2813 hash_del(&found->entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002814 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2815 }
2816
Scott Feldman179f9a22015-06-12 21:35:46 -07002817 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002818
Simon Horman0985df72015-05-25 14:28:35 +09002819 rocker_group_tbl_entry_free(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002820
2821 if (found) {
Scott Feldman179f9a22015-06-12 21:35:46 -07002822 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002823 rocker_cmd_group_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002824 found, NULL, NULL);
Simon Horman0985df72015-05-25 14:28:35 +09002825 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002826 }
2827
2828 return err;
2829}
2830
2831static int rocker_group_tbl_do(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002832 enum switchdev_trans trans, int flags,
2833 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002834{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002835 if (flags & ROCKER_OP_FLAG_REMOVE)
Scott Feldman179f9a22015-06-12 21:35:46 -07002836 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002837 else
Scott Feldman179f9a22015-06-12 21:35:46 -07002838 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002839}
2840
2841static int rocker_group_l2_interface(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002842 enum switchdev_trans trans, int flags,
2843 __be16 vlan_id, u32 out_pport,
2844 int pop_vlan)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002845{
2846 struct rocker_group_tbl_entry *entry;
2847
Scott Feldman179f9a22015-06-12 21:35:46 -07002848 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002849 if (!entry)
2850 return -ENOMEM;
2851
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002852 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002853 entry->l2_interface.pop_vlan = pop_vlan;
2854
Scott Feldmanc4f20322015-05-10 09:47:50 -07002855 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002856}
2857
2858static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002859 enum switchdev_trans trans,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002860 int flags, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002861 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002862{
2863 struct rocker_group_tbl_entry *entry;
2864
Scott Feldman179f9a22015-06-12 21:35:46 -07002865 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002866 if (!entry)
2867 return -ENOMEM;
2868
2869 entry->group_id = group_id;
2870 entry->group_count = group_count;
2871
Scott Feldman179f9a22015-06-12 21:35:46 -07002872 entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
2873 group_count, sizeof(u32));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002874 if (!entry->group_ids) {
Simon Horman0985df72015-05-25 14:28:35 +09002875 rocker_port_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002876 return -ENOMEM;
2877 }
2878 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2879
Scott Feldmanc4f20322015-05-10 09:47:50 -07002880 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002881}
2882
2883static int rocker_group_l2_flood(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002884 enum switchdev_trans trans, int flags,
2885 __be16 vlan_id, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002886 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002887{
Scott Feldmanc4f20322015-05-10 09:47:50 -07002888 return rocker_group_l2_fan_out(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002889 group_count, group_ids,
2890 group_id);
2891}
2892
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002893static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002894 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09002895 u32 index, const u8 *src_mac, const u8 *dst_mac,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002896 __be16 vlan_id, bool ttl_check, u32 pport)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002897{
2898 struct rocker_group_tbl_entry *entry;
2899
Scott Feldman179f9a22015-06-12 21:35:46 -07002900 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002901 if (!entry)
2902 return -ENOMEM;
2903
2904 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2905 if (src_mac)
2906 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2907 if (dst_mac)
2908 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2909 entry->l3_unicast.vlan_id = vlan_id;
2910 entry->l3_unicast.ttl_check = ttl_check;
2911 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2912
Scott Feldmanc4f20322015-05-10 09:47:50 -07002913 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002914}
2915
2916static struct rocker_neigh_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002917rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002918{
2919 struct rocker_neigh_tbl_entry *found;
2920
Scott Feldman0f43deb2015-03-06 15:54:51 -08002921 hash_for_each_possible(rocker->neigh_tbl, found,
2922 entry, be32_to_cpu(ip_addr))
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002923 if (found->ip_addr == ip_addr)
2924 return found;
2925
2926 return NULL;
2927}
2928
2929static void _rocker_neigh_add(struct rocker *rocker,
Simon Horman550ecc92015-05-21 12:40:16 +09002930 enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002931 struct rocker_neigh_tbl_entry *entry)
2932{
Scott Feldman4d81db42015-06-12 21:24:40 -07002933 if (trans != SWITCHDEV_TRANS_COMMIT)
2934 entry->index = rocker->neigh_tbl_next_index++;
Simon Horman550ecc92015-05-21 12:40:16 +09002935 if (trans == SWITCHDEV_TRANS_PREPARE)
2936 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002937 entry->ref_count++;
Scott Feldman0f43deb2015-03-06 15:54:51 -08002938 hash_add(rocker->neigh_tbl, &entry->entry,
2939 be32_to_cpu(entry->ip_addr));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002940}
2941
Simon Horman0985df72015-05-25 14:28:35 +09002942static void _rocker_neigh_del(enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002943 struct rocker_neigh_tbl_entry *entry)
2944{
Simon Horman550ecc92015-05-21 12:40:16 +09002945 if (trans == SWITCHDEV_TRANS_PREPARE)
2946 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002947 if (--entry->ref_count == 0) {
2948 hash_del(&entry->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002949 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002950 }
2951}
2952
Scott Feldmanc4f20322015-05-10 09:47:50 -07002953static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
Simon Horman550ecc92015-05-21 12:40:16 +09002954 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09002955 const u8 *eth_dst, bool ttl_check)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002956{
2957 if (eth_dst) {
2958 ether_addr_copy(entry->eth_dst, eth_dst);
2959 entry->ttl_check = ttl_check;
Simon Horman550ecc92015-05-21 12:40:16 +09002960 } else if (trans != SWITCHDEV_TRANS_PREPARE) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002961 entry->ref_count++;
2962 }
2963}
2964
2965static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002966 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09002967 int flags, __be32 ip_addr, const u8 *eth_dst)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002968{
2969 struct rocker *rocker = rocker_port->rocker;
2970 struct rocker_neigh_tbl_entry *entry;
2971 struct rocker_neigh_tbl_entry *found;
2972 unsigned long lock_flags;
2973 __be16 eth_type = htons(ETH_P_IP);
2974 enum rocker_of_dpa_table_id goto_tbl =
2975 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2976 u32 group_id;
2977 u32 priority = 0;
2978 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2979 bool updating;
2980 bool removing;
2981 int err = 0;
2982
Scott Feldman179f9a22015-06-12 21:35:46 -07002983 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002984 if (!entry)
2985 return -ENOMEM;
2986
2987 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2988
2989 found = rocker_neigh_tbl_find(rocker, ip_addr);
2990
2991 updating = found && adding;
2992 removing = found && !adding;
2993 adding = !found && adding;
2994
2995 if (adding) {
2996 entry->ip_addr = ip_addr;
2997 entry->dev = rocker_port->dev;
2998 ether_addr_copy(entry->eth_dst, eth_dst);
2999 entry->ttl_check = true;
Simon Horman550ecc92015-05-21 12:40:16 +09003000 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003001 } else if (removing) {
3002 memcpy(entry, found, sizeof(*entry));
Simon Horman0985df72015-05-25 14:28:35 +09003003 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003004 } else if (updating) {
Simon Horman550ecc92015-05-21 12:40:16 +09003005 _rocker_neigh_update(found, trans, eth_dst, true);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003006 memcpy(entry, found, sizeof(*entry));
3007 } else {
3008 err = -ENOENT;
3009 }
3010
3011 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3012
3013 if (err)
3014 goto err_out;
3015
3016 /* For each active neighbor, we have an L3 unicast group and
3017 * a /32 route to the neighbor, which uses the L3 unicast
3018 * group. The L3 unicast group can also be referred to by
3019 * other routes' nexthops.
3020 */
3021
Scott Feldmanc4f20322015-05-10 09:47:50 -07003022 err = rocker_group_l3_unicast(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003023 entry->index,
3024 rocker_port->dev->dev_addr,
3025 entry->eth_dst,
3026 rocker_port->internal_vlan_id,
3027 entry->ttl_check,
3028 rocker_port->pport);
3029 if (err) {
3030 netdev_err(rocker_port->dev,
3031 "Error (%d) L3 unicast group index %d\n",
3032 err, entry->index);
3033 goto err_out;
3034 }
3035
3036 if (adding || removing) {
3037 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003038 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003039 eth_type, ip_addr,
3040 inet_make_mask(32),
3041 priority, goto_tbl,
3042 group_id, flags);
3043
3044 if (err)
3045 netdev_err(rocker_port->dev,
3046 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3047 err, &entry->ip_addr, group_id);
3048 }
3049
3050err_out:
3051 if (!adding)
Simon Horman0985df72015-05-25 14:28:35 +09003052 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003053
3054 return err;
3055}
3056
3057static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003058 enum switchdev_trans trans, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003059{
3060 struct net_device *dev = rocker_port->dev;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003061 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003062 int err = 0;
3063
Ying Xue4133fc02015-05-15 12:53:21 +08003064 if (!n) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003065 n = neigh_create(&arp_tbl, &ip_addr, dev);
Ying Xue4133fc02015-05-15 12:53:21 +08003066 if (IS_ERR(n))
3067 return IS_ERR(n);
3068 }
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003069
3070 /* If the neigh is already resolved, then go ahead and
3071 * install the entry, otherwise start the ARP process to
3072 * resolve the neigh.
3073 */
3074
3075 if (n->nud_state & NUD_VALID)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003076 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3077 ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003078 else
3079 neigh_event_send(n, NULL);
3080
Ying Xue4133fc02015-05-15 12:53:21 +08003081 neigh_release(n);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003082 return err;
3083}
3084
Scott Feldmanc4f20322015-05-10 09:47:50 -07003085static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3086 enum switchdev_trans trans, int flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003087 __be32 ip_addr, u32 *index)
3088{
3089 struct rocker *rocker = rocker_port->rocker;
3090 struct rocker_neigh_tbl_entry *entry;
3091 struct rocker_neigh_tbl_entry *found;
3092 unsigned long lock_flags;
3093 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3094 bool updating;
3095 bool removing;
3096 bool resolved = true;
3097 int err = 0;
3098
Scott Feldman179f9a22015-06-12 21:35:46 -07003099 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003100 if (!entry)
3101 return -ENOMEM;
3102
3103 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3104
3105 found = rocker_neigh_tbl_find(rocker, ip_addr);
3106 if (found)
3107 *index = found->index;
3108
3109 updating = found && adding;
3110 removing = found && !adding;
3111 adding = !found && adding;
3112
3113 if (adding) {
3114 entry->ip_addr = ip_addr;
3115 entry->dev = rocker_port->dev;
Simon Horman550ecc92015-05-21 12:40:16 +09003116 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003117 *index = entry->index;
3118 resolved = false;
3119 } else if (removing) {
Simon Horman0985df72015-05-25 14:28:35 +09003120 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003121 } else if (updating) {
Simon Horman550ecc92015-05-21 12:40:16 +09003122 _rocker_neigh_update(found, trans, NULL, false);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003123 resolved = !is_zero_ether_addr(found->eth_dst);
3124 } else {
3125 err = -ENOENT;
3126 }
3127
3128 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3129
3130 if (!adding)
Simon Horman0985df72015-05-25 14:28:35 +09003131 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003132
3133 if (err)
3134 return err;
3135
3136 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3137
3138 if (!resolved)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003139 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003140
3141 return err;
3142}
3143
Scott Feldman6c707942014-11-28 14:34:28 +01003144static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003145 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003146 int flags, __be16 vlan_id)
3147{
3148 struct rocker_port *p;
Simon Hormane5054642015-05-25 14:28:36 +09003149 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003150 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
Scott Feldman04f49fa2015-03-15 23:04:46 -07003151 u32 *group_ids;
Scott Feldman6c707942014-11-28 14:34:28 +01003152 u8 group_count = 0;
Scott Feldman04f49fa2015-03-15 23:04:46 -07003153 int err = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01003154 int i;
3155
Scott Feldman179f9a22015-06-12 21:35:46 -07003156 group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
3157 rocker->port_count, sizeof(u32));
Scott Feldman04f49fa2015-03-15 23:04:46 -07003158 if (!group_ids)
3159 return -ENOMEM;
3160
Scott Feldman6c707942014-11-28 14:34:28 +01003161 /* Adjust the flood group for this VLAN. The flood group
3162 * references an L2 interface group for each port in this
3163 * VLAN.
3164 */
3165
3166 for (i = 0; i < rocker->port_count; i++) {
3167 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003168 if (!p)
3169 continue;
Scott Feldman6c707942014-11-28 14:34:28 +01003170 if (!rocker_port_is_bridged(p))
3171 continue;
3172 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3173 group_ids[group_count++] =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003174 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003175 }
3176 }
3177
3178 /* If there are no bridged ports in this VLAN, we're done */
3179 if (group_count == 0)
Scott Feldman04f49fa2015-03-15 23:04:46 -07003180 goto no_ports_in_vlan;
Scott Feldman6c707942014-11-28 14:34:28 +01003181
Scott Feldmanc4f20322015-05-10 09:47:50 -07003182 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3183 group_count, group_ids, group_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003184 if (err)
3185 netdev_err(rocker_port->dev,
3186 "Error (%d) port VLAN l2 flood group\n", err);
3187
Scott Feldman04f49fa2015-03-15 23:04:46 -07003188no_ports_in_vlan:
Simon Horman0985df72015-05-25 14:28:35 +09003189 rocker_port_kfree(trans, group_ids);
Scott Feldman6c707942014-11-28 14:34:28 +01003190 return err;
3191}
3192
3193static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003194 enum switchdev_trans trans, int flags,
3195 __be16 vlan_id, bool pop_vlan)
Scott Feldman6c707942014-11-28 14:34:28 +01003196{
Simon Hormane5054642015-05-25 14:28:36 +09003197 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003198 struct rocker_port *p;
3199 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003200 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003201 int ref = 0;
3202 int err;
3203 int i;
3204
3205 /* An L2 interface group for this port in this VLAN, but
3206 * only when port STP state is LEARNING|FORWARDING.
3207 */
3208
3209 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3210 rocker_port->stp_state == BR_STATE_FORWARDING) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003211 out_pport = rocker_port->pport;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003212 err = rocker_group_l2_interface(rocker_port, trans, flags,
3213 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003214 if (err) {
3215 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003216 "Error (%d) port VLAN l2 group for pport %d\n",
3217 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003218 return err;
3219 }
3220 }
3221
3222 /* An L2 interface group for this VLAN to CPU port.
3223 * Add when first port joins this VLAN and destroy when
3224 * last port leaves this VLAN.
3225 */
3226
3227 for (i = 0; i < rocker->port_count; i++) {
3228 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003229 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003230 ref++;
3231 }
3232
3233 if ((!adding || ref != 1) && (adding || ref != 0))
3234 return 0;
3235
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003236 out_pport = 0;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003237 err = rocker_group_l2_interface(rocker_port, trans, flags,
3238 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003239 if (err) {
3240 netdev_err(rocker_port->dev,
3241 "Error (%d) port VLAN l2 group for CPU port\n", err);
3242 return err;
3243 }
3244
3245 return 0;
3246}
3247
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003248static struct rocker_ctrl {
3249 const u8 *eth_dst;
3250 const u8 *eth_dst_mask;
Jiri Pirko11e6c652014-12-03 14:14:54 +01003251 __be16 eth_type;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003252 bool acl;
3253 bool bridge;
3254 bool term;
3255 bool copy_to_cpu;
3256} rocker_ctrls[] = {
3257 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3258 /* pass link local multicast pkts up to CPU for filtering */
3259 .eth_dst = ll_mac,
3260 .eth_dst_mask = ll_mask,
3261 .acl = true,
3262 },
3263 [ROCKER_CTRL_LOCAL_ARP] = {
3264 /* pass local ARP pkts up to CPU */
3265 .eth_dst = zero_mac,
3266 .eth_dst_mask = zero_mac,
3267 .eth_type = htons(ETH_P_ARP),
3268 .acl = true,
3269 },
3270 [ROCKER_CTRL_IPV4_MCAST] = {
3271 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3272 .eth_dst = ipv4_mcast,
3273 .eth_dst_mask = ipv4_mask,
3274 .eth_type = htons(ETH_P_IP),
3275 .term = true,
3276 .copy_to_cpu = true,
3277 },
3278 [ROCKER_CTRL_IPV6_MCAST] = {
3279 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3280 .eth_dst = ipv6_mcast,
3281 .eth_dst_mask = ipv6_mask,
3282 .eth_type = htons(ETH_P_IPV6),
3283 .term = true,
3284 .copy_to_cpu = true,
3285 },
3286 [ROCKER_CTRL_DFLT_BRIDGING] = {
3287 /* flood any pkts on vlan */
3288 .bridge = true,
3289 .copy_to_cpu = true,
3290 },
Simon Horman82549732015-07-16 10:39:14 +09003291 [ROCKER_CTRL_DFLT_OVS] = {
3292 /* pass all pkts up to CPU */
3293 .eth_dst = zero_mac,
3294 .eth_dst_mask = zero_mac,
3295 .acl = true,
3296 },
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003297};
3298
3299static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003300 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003301 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003302{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003303 u32 in_pport = rocker_port->pport;
3304 u32 in_pport_mask = 0xffffffff;
3305 u32 out_pport = 0;
Simon Hormane5054642015-05-25 14:28:36 +09003306 const u8 *eth_src = NULL;
3307 const u8 *eth_src_mask = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003308 __be16 vlan_id_mask = htons(0xffff);
3309 u8 ip_proto = 0;
3310 u8 ip_proto_mask = 0;
3311 u8 ip_tos = 0;
3312 u8 ip_tos_mask = 0;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003313 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003314 int err;
3315
Scott Feldmanc4f20322015-05-10 09:47:50 -07003316 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003317 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003318 eth_src, eth_src_mask,
3319 ctrl->eth_dst, ctrl->eth_dst_mask,
3320 ctrl->eth_type,
3321 vlan_id, vlan_id_mask,
3322 ip_proto, ip_proto_mask,
3323 ip_tos, ip_tos_mask,
3324 group_id);
3325
3326 if (err)
3327 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3328
3329 return err;
3330}
3331
Scott Feldman6c707942014-11-28 14:34:28 +01003332static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003333 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003334 const struct rocker_ctrl *ctrl,
Scott Feldman6c707942014-11-28 14:34:28 +01003335 __be16 vlan_id)
3336{
3337 enum rocker_of_dpa_table_id goto_tbl =
3338 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3339 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3340 u32 tunnel_id = 0;
3341 int err;
3342
3343 if (!rocker_port_is_bridged(rocker_port))
3344 return 0;
3345
Scott Feldmanc4f20322015-05-10 09:47:50 -07003346 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003347 ctrl->eth_dst, ctrl->eth_dst_mask,
3348 vlan_id, tunnel_id,
3349 goto_tbl, group_id, ctrl->copy_to_cpu);
3350
3351 if (err)
3352 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3353
3354 return err;
3355}
3356
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003357static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003358 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003359 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003360{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003361 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003362 __be16 vlan_id_mask = htons(0xffff);
3363 int err;
3364
3365 if (ntohs(vlan_id) == 0)
3366 vlan_id = rocker_port->internal_vlan_id;
3367
Scott Feldmanc4f20322015-05-10 09:47:50 -07003368 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003369 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003370 ctrl->eth_type, ctrl->eth_dst,
3371 ctrl->eth_dst_mask, vlan_id,
3372 vlan_id_mask, ctrl->copy_to_cpu,
3373 flags);
3374
3375 if (err)
3376 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3377
3378 return err;
3379}
3380
Scott Feldmanc4f20322015-05-10 09:47:50 -07003381static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3382 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003383 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003384{
3385 if (ctrl->acl)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003386 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003387 ctrl, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003388 if (ctrl->bridge)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003389 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003390 ctrl, vlan_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003391
3392 if (ctrl->term)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003393 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003394 ctrl, vlan_id);
3395
3396 return -EOPNOTSUPP;
3397}
3398
3399static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003400 enum switchdev_trans trans, int flags,
3401 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003402{
3403 int err = 0;
3404 int i;
3405
3406 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3407 if (rocker_port->ctrls[i]) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003408 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003409 &rocker_ctrls[i], vlan_id);
3410 if (err)
3411 return err;
3412 }
3413 }
3414
3415 return err;
3416}
3417
Scott Feldmanc4f20322015-05-10 09:47:50 -07003418static int rocker_port_ctrl(struct rocker_port *rocker_port,
3419 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003420 const struct rocker_ctrl *ctrl)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003421{
3422 u16 vid;
3423 int err = 0;
3424
3425 for (vid = 1; vid < VLAN_N_VID; vid++) {
3426 if (!test_bit(vid, rocker_port->vlan_bitmap))
3427 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003428 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003429 ctrl, htons(vid));
3430 if (err)
3431 break;
3432 }
3433
3434 return err;
3435}
3436
Scott Feldmanc4f20322015-05-10 09:47:50 -07003437static int rocker_port_vlan(struct rocker_port *rocker_port,
3438 enum switchdev_trans trans, int flags, u16 vid)
Scott Feldman6c707942014-11-28 14:34:28 +01003439{
3440 enum rocker_of_dpa_table_id goto_tbl =
3441 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003442 u32 in_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003443 __be16 vlan_id = htons(vid);
3444 __be16 vlan_id_mask = htons(0xffff);
3445 __be16 internal_vlan_id;
3446 bool untagged;
3447 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3448 int err;
3449
3450 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3451
Scott Feldman9228ad22015-05-10 09:47:54 -07003452 if (adding && test_bit(ntohs(internal_vlan_id),
3453 rocker_port->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003454 return 0; /* already added */
Scott Feldman9228ad22015-05-10 09:47:54 -07003455 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3456 rocker_port->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003457 return 0; /* already removed */
3458
Scott Feldman9228ad22015-05-10 09:47:54 -07003459 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3460
Scott Feldman6c707942014-11-28 14:34:28 +01003461 if (adding) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003462 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003463 internal_vlan_id);
3464 if (err) {
3465 netdev_err(rocker_port->dev,
3466 "Error (%d) port ctrl vlan add\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003467 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003468 }
3469 }
3470
Scott Feldmanc4f20322015-05-10 09:47:50 -07003471 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003472 internal_vlan_id, untagged);
3473 if (err) {
3474 netdev_err(rocker_port->dev,
3475 "Error (%d) port VLAN l2 groups\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003476 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003477 }
3478
Scott Feldmanc4f20322015-05-10 09:47:50 -07003479 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003480 internal_vlan_id);
3481 if (err) {
3482 netdev_err(rocker_port->dev,
3483 "Error (%d) port VLAN l2 flood group\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003484 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003485 }
3486
Scott Feldmanc4f20322015-05-10 09:47:50 -07003487 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003488 in_pport, vlan_id, vlan_id_mask,
Scott Feldman6c707942014-11-28 14:34:28 +01003489 goto_tbl, untagged, internal_vlan_id);
3490 if (err)
3491 netdev_err(rocker_port->dev,
3492 "Error (%d) port VLAN table\n", err);
3493
Scott Feldman9228ad22015-05-10 09:47:54 -07003494err_out:
3495 if (trans == SWITCHDEV_TRANS_PREPARE)
3496 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3497
Scott Feldman6c707942014-11-28 14:34:28 +01003498 return err;
3499}
3500
Scott Feldmanc4f20322015-05-10 09:47:50 -07003501static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3502 enum switchdev_trans trans, int flags)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003503{
3504 enum rocker_of_dpa_table_id goto_tbl;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003505 u32 in_pport;
3506 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003507 int err;
3508
3509 /* Normal Ethernet Frames. Matches pkts from any local physical
3510 * ports. Goto VLAN tbl.
3511 */
3512
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003513 in_pport = 0;
3514 in_pport_mask = 0xffff0000;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003515 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3516
Scott Feldmanc4f20322015-05-10 09:47:50 -07003517 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003518 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003519 goto_tbl);
3520 if (err)
3521 netdev_err(rocker_port->dev,
3522 "Error (%d) ingress port table entry\n", err);
3523
3524 return err;
3525}
3526
Scott Feldman6c707942014-11-28 14:34:28 +01003527struct rocker_fdb_learn_work {
3528 struct work_struct work;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003529 struct rocker_port *rocker_port;
3530 enum switchdev_trans trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003531 int flags;
3532 u8 addr[ETH_ALEN];
3533 u16 vid;
3534};
3535
3536static void rocker_port_fdb_learn_work(struct work_struct *work)
3537{
Simon Hormane5054642015-05-25 14:28:36 +09003538 const struct rocker_fdb_learn_work *lw =
Scott Feldman6c707942014-11-28 14:34:28 +01003539 container_of(work, struct rocker_fdb_learn_work, work);
3540 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3541 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003542 struct switchdev_notifier_fdb_info info;
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003543
3544 info.addr = lw->addr;
3545 info.vid = lw->vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003546
Thomas Graf51ace882014-11-28 14:34:32 +01003547 if (learned && removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003548 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003549 lw->rocker_port->dev, &info.info);
Thomas Graf51ace882014-11-28 14:34:32 +01003550 else if (learned && !removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003551 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003552 lw->rocker_port->dev, &info.info);
Scott Feldman6c707942014-11-28 14:34:28 +01003553
Simon Horman0985df72015-05-25 14:28:35 +09003554 rocker_port_kfree(lw->trans, work);
Scott Feldman6c707942014-11-28 14:34:28 +01003555}
3556
3557static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003558 enum switchdev_trans trans, int flags,
3559 const u8 *addr, __be16 vlan_id)
Scott Feldman6c707942014-11-28 14:34:28 +01003560{
3561 struct rocker_fdb_learn_work *lw;
3562 enum rocker_of_dpa_table_id goto_tbl =
3563 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003564 u32 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003565 u32 tunnel_id = 0;
3566 u32 group_id = ROCKER_GROUP_NONE;
Scott Feldman5111f802014-11-28 14:34:30 +01003567 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
Scott Feldman6c707942014-11-28 14:34:28 +01003568 bool copy_to_cpu = false;
3569 int err;
3570
3571 if (rocker_port_is_bridged(rocker_port))
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003572 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003573
3574 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003575 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3576 NULL, vlan_id, tunnel_id, goto_tbl,
Scott Feldman6c707942014-11-28 14:34:28 +01003577 group_id, copy_to_cpu);
3578 if (err)
3579 return err;
3580 }
3581
Scott Feldman5111f802014-11-28 14:34:30 +01003582 if (!syncing)
3583 return 0;
3584
Scott Feldman6c707942014-11-28 14:34:28 +01003585 if (!rocker_port_is_bridged(rocker_port))
3586 return 0;
3587
Scott Feldman179f9a22015-06-12 21:35:46 -07003588 lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
Scott Feldman6c707942014-11-28 14:34:28 +01003589 if (!lw)
3590 return -ENOMEM;
3591
3592 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3593
Scott Feldmanc4f20322015-05-10 09:47:50 -07003594 lw->rocker_port = rocker_port;
3595 lw->trans = trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003596 lw->flags = flags;
3597 ether_addr_copy(lw->addr, addr);
3598 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3599
Scott Feldmanc4f20322015-05-10 09:47:50 -07003600 if (trans == SWITCHDEV_TRANS_PREPARE)
Simon Horman0985df72015-05-25 14:28:35 +09003601 rocker_port_kfree(trans, lw);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003602 else
3603 schedule_work(&lw->work);
Scott Feldman6c707942014-11-28 14:34:28 +01003604
3605 return 0;
3606}
3607
3608static struct rocker_fdb_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003609rocker_fdb_tbl_find(const struct rocker *rocker,
3610 const struct rocker_fdb_tbl_entry *match)
Scott Feldman6c707942014-11-28 14:34:28 +01003611{
3612 struct rocker_fdb_tbl_entry *found;
3613
3614 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3615 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3616 return found;
3617
3618 return NULL;
3619}
3620
3621static int rocker_port_fdb(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003622 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003623 const unsigned char *addr,
3624 __be16 vlan_id, int flags)
3625{
3626 struct rocker *rocker = rocker_port->rocker;
3627 struct rocker_fdb_tbl_entry *fdb;
3628 struct rocker_fdb_tbl_entry *found;
3629 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3630 unsigned long lock_flags;
3631
Scott Feldman179f9a22015-06-12 21:35:46 -07003632 fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
Scott Feldman6c707942014-11-28 14:34:28 +01003633 if (!fdb)
3634 return -ENOMEM;
3635
3636 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003637 fdb->key.pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003638 ether_addr_copy(fdb->key.addr, addr);
3639 fdb->key.vlan_id = vlan_id;
3640 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3641
3642 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3643
3644 found = rocker_fdb_tbl_find(rocker, fdb);
3645
3646 if (removing && found) {
Simon Horman0985df72015-05-25 14:28:35 +09003647 rocker_port_kfree(trans, fdb);
Simon Horman42e94882015-05-21 12:40:15 +09003648 if (trans != SWITCHDEV_TRANS_PREPARE)
3649 hash_del(&found->entry);
Scott Feldman6c707942014-11-28 14:34:28 +01003650 } else if (!removing && !found) {
Simon Horman42e94882015-05-21 12:40:15 +09003651 if (trans != SWITCHDEV_TRANS_PREPARE)
3652 hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
Scott Feldman6c707942014-11-28 14:34:28 +01003653 }
3654
3655 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3656
3657 /* Check if adding and already exists, or removing and can't find */
3658 if (!found != !removing) {
Simon Horman0985df72015-05-25 14:28:35 +09003659 rocker_port_kfree(trans, fdb);
Scott Feldman6c707942014-11-28 14:34:28 +01003660 if (!found && removing)
3661 return 0;
3662 /* Refreshing existing to update aging timers */
3663 flags |= ROCKER_OP_FLAG_REFRESH;
3664 }
3665
Scott Feldmanc4f20322015-05-10 09:47:50 -07003666 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003667}
3668
Scott Feldmanc4f20322015-05-10 09:47:50 -07003669static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003670 enum switchdev_trans trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003671{
3672 struct rocker *rocker = rocker_port->rocker;
3673 struct rocker_fdb_tbl_entry *found;
3674 unsigned long lock_flags;
Scott Feldman6c707942014-11-28 14:34:28 +01003675 struct hlist_node *tmp;
3676 int bkt;
3677 int err = 0;
3678
3679 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3680 rocker_port->stp_state == BR_STATE_FORWARDING)
3681 return 0;
3682
Scott Feldman179f9a22015-06-12 21:35:46 -07003683 flags |= ROCKER_OP_FLAG_REMOVE;
3684
Scott Feldman6c707942014-11-28 14:34:28 +01003685 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3686
3687 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003688 if (found->key.pport != rocker_port->pport)
Scott Feldman6c707942014-11-28 14:34:28 +01003689 continue;
3690 if (!found->learned)
3691 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003692 err = rocker_port_fdb_learn(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003693 found->key.addr,
3694 found->key.vlan_id);
3695 if (err)
3696 goto err_out;
Simon Horman3098ac32015-05-21 12:40:14 +09003697 if (trans != SWITCHDEV_TRANS_PREPARE)
3698 hash_del(&found->entry);
Scott Feldman6c707942014-11-28 14:34:28 +01003699 }
3700
3701err_out:
3702 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3703
3704 return err;
3705}
3706
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003707static int rocker_port_router_mac(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003708 enum switchdev_trans trans, int flags,
3709 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003710{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003711 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003712 __be16 eth_type;
3713 const u8 *dst_mac_mask = ff_mac;
3714 __be16 vlan_id_mask = htons(0xffff);
3715 bool copy_to_cpu = false;
3716 int err;
3717
3718 if (ntohs(vlan_id) == 0)
3719 vlan_id = rocker_port->internal_vlan_id;
3720
3721 eth_type = htons(ETH_P_IP);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003722 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003723 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003724 eth_type, rocker_port->dev->dev_addr,
3725 dst_mac_mask, vlan_id, vlan_id_mask,
3726 copy_to_cpu, flags);
3727 if (err)
3728 return err;
3729
3730 eth_type = htons(ETH_P_IPV6);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003731 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003732 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003733 eth_type, rocker_port->dev->dev_addr,
3734 dst_mac_mask, vlan_id, vlan_id_mask,
3735 copy_to_cpu, flags);
3736
3737 return err;
3738}
3739
Scott Feldmanc4f20322015-05-10 09:47:50 -07003740static int rocker_port_fwding(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003741 enum switchdev_trans trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003742{
3743 bool pop_vlan;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003744 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003745 __be16 vlan_id;
3746 u16 vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003747 int err;
3748
3749 /* Port will be forwarding-enabled if its STP state is LEARNING
3750 * or FORWARDING. Traffic from CPU can still egress, regardless of
3751 * port STP state. Use L2 interface group on port VLANs as a way
3752 * to toggle port forwarding: if forwarding is disabled, L2
3753 * interface group will not exist.
3754 */
3755
3756 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3757 rocker_port->stp_state != BR_STATE_FORWARDING)
3758 flags |= ROCKER_OP_FLAG_REMOVE;
3759
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003760 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003761 for (vid = 1; vid < VLAN_N_VID; vid++) {
3762 if (!test_bit(vid, rocker_port->vlan_bitmap))
3763 continue;
3764 vlan_id = htons(vid);
3765 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003766 err = rocker_group_l2_interface(rocker_port, trans, flags,
3767 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003768 if (err) {
3769 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003770 "Error (%d) port VLAN l2 group for pport %d\n",
3771 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003772 return err;
3773 }
3774 }
3775
3776 return 0;
3777}
3778
Scott Feldmanc4f20322015-05-10 09:47:50 -07003779static int rocker_port_stp_update(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003780 enum switchdev_trans trans, int flags,
3781 u8 state)
Scott Feldman6c707942014-11-28 14:34:28 +01003782{
3783 bool want[ROCKER_CTRL_MAX] = { 0, };
Scott Feldmanc4f20322015-05-10 09:47:50 -07003784 bool prev_ctrls[ROCKER_CTRL_MAX];
3785 u8 prev_state;
Scott Feldman6c707942014-11-28 14:34:28 +01003786 int err;
3787 int i;
3788
Scott Feldmanc4f20322015-05-10 09:47:50 -07003789 if (trans == SWITCHDEV_TRANS_PREPARE) {
3790 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3791 prev_state = rocker_port->stp_state;
3792 }
3793
Scott Feldman6c707942014-11-28 14:34:28 +01003794 if (rocker_port->stp_state == state)
3795 return 0;
3796
3797 rocker_port->stp_state = state;
3798
3799 switch (state) {
3800 case BR_STATE_DISABLED:
3801 /* port is completely disabled */
3802 break;
3803 case BR_STATE_LISTENING:
3804 case BR_STATE_BLOCKING:
3805 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3806 break;
3807 case BR_STATE_LEARNING:
3808 case BR_STATE_FORWARDING:
Simon Horman82549732015-07-16 10:39:14 +09003809 if (!rocker_port_is_ovsed(rocker_port))
3810 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003811 want[ROCKER_CTRL_IPV4_MCAST] = true;
3812 want[ROCKER_CTRL_IPV6_MCAST] = true;
3813 if (rocker_port_is_bridged(rocker_port))
3814 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
Simon Horman82549732015-07-16 10:39:14 +09003815 else if (rocker_port_is_ovsed(rocker_port))
3816 want[ROCKER_CTRL_DFLT_OVS] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003817 else
3818 want[ROCKER_CTRL_LOCAL_ARP] = true;
3819 break;
3820 }
3821
3822 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3823 if (want[i] != rocker_port->ctrls[i]) {
Scott Feldman179f9a22015-06-12 21:35:46 -07003824 int ctrl_flags = flags |
3825 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3826 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003827 &rocker_ctrls[i]);
3828 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003829 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003830 rocker_port->ctrls[i] = want[i];
3831 }
3832 }
3833
Scott Feldman179f9a22015-06-12 21:35:46 -07003834 err = rocker_port_fdb_flush(rocker_port, trans, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01003835 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003836 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003837
Scott Feldman179f9a22015-06-12 21:35:46 -07003838 err = rocker_port_fwding(rocker_port, trans, flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003839
3840err_out:
3841 if (trans == SWITCHDEV_TRANS_PREPARE) {
3842 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3843 rocker_port->stp_state = prev_state;
3844 }
3845
3846 return err;
Scott Feldman6c707942014-11-28 14:34:28 +01003847}
3848
Scott Feldmanc4f20322015-05-10 09:47:50 -07003849static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003850 enum switchdev_trans trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003851{
3852 if (rocker_port_is_bridged(rocker_port))
3853 /* bridge STP will enable port */
3854 return 0;
3855
3856 /* port is not bridged, so simulate going to FORWARDING state */
Scott Feldman179f9a22015-06-12 21:35:46 -07003857 return rocker_port_stp_update(rocker_port, trans, flags,
3858 BR_STATE_FORWARDING);
Scott Feldmane47172a2015-02-25 20:15:38 -08003859}
3860
Scott Feldmanc4f20322015-05-10 09:47:50 -07003861static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003862 enum switchdev_trans trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003863{
3864 if (rocker_port_is_bridged(rocker_port))
3865 /* bridge STP will disable port */
3866 return 0;
3867
3868 /* port is not bridged, so simulate going to DISABLED state */
Scott Feldman179f9a22015-06-12 21:35:46 -07003869 return rocker_port_stp_update(rocker_port, trans, flags,
3870 BR_STATE_DISABLED);
Scott Feldmane47172a2015-02-25 20:15:38 -08003871}
3872
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003873static struct rocker_internal_vlan_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003874rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003875{
3876 struct rocker_internal_vlan_tbl_entry *found;
3877
3878 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3879 entry, ifindex) {
3880 if (found->ifindex == ifindex)
3881 return found;
3882 }
3883
3884 return NULL;
3885}
3886
3887static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3888 int ifindex)
3889{
3890 struct rocker *rocker = rocker_port->rocker;
3891 struct rocker_internal_vlan_tbl_entry *entry;
3892 struct rocker_internal_vlan_tbl_entry *found;
3893 unsigned long lock_flags;
3894 int i;
3895
Simon Hormandf6a2062015-05-21 12:40:17 +09003896 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003897 if (!entry)
3898 return 0;
3899
3900 entry->ifindex = ifindex;
3901
3902 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3903
3904 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3905 if (found) {
Simon Hormandf6a2062015-05-21 12:40:17 +09003906 kfree(entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003907 goto found;
3908 }
3909
3910 found = entry;
3911 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3912
3913 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3914 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3915 continue;
3916 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3917 goto found;
3918 }
3919
3920 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3921
3922found:
3923 found->ref_count++;
3924 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3925
3926 return found->vlan_id;
3927}
3928
Simon Hormane5054642015-05-25 14:28:36 +09003929static void
3930rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3931 int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003932{
3933 struct rocker *rocker = rocker_port->rocker;
3934 struct rocker_internal_vlan_tbl_entry *found;
3935 unsigned long lock_flags;
3936 unsigned long bit;
3937
3938 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3939
3940 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3941 if (!found) {
3942 netdev_err(rocker_port->dev,
3943 "ifindex (%d) not found in internal VLAN tbl\n",
3944 ifindex);
3945 goto not_found;
3946 }
3947
3948 if (--found->ref_count <= 0) {
3949 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3950 clear_bit(bit, rocker->internal_vlan_bitmap);
3951 hash_del(&found->entry);
Simon Hormandf6a2062015-05-21 12:40:17 +09003952 kfree(found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003953 }
3954
3955not_found:
3956 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3957}
3958
Scott Feldmanc4f20322015-05-10 09:47:50 -07003959static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
3960 enum switchdev_trans trans, __be32 dst,
Simon Hormane5054642015-05-25 14:28:36 +09003961 int dst_len, const struct fib_info *fi,
3962 u32 tb_id, int flags)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003963{
Simon Hormane5054642015-05-25 14:28:36 +09003964 const struct fib_nh *nh;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003965 __be16 eth_type = htons(ETH_P_IP);
3966 __be32 dst_mask = inet_make_mask(dst_len);
3967 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3968 u32 priority = fi->fib_priority;
3969 enum rocker_of_dpa_table_id goto_tbl =
3970 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3971 u32 group_id;
3972 bool nh_on_port;
3973 bool has_gw;
3974 u32 index;
3975 int err;
3976
3977 /* XXX support ECMP */
3978
3979 nh = fi->fib_nh;
3980 nh_on_port = (fi->fib_dev == rocker_port->dev);
3981 has_gw = !!nh->nh_gw;
3982
3983 if (has_gw && nh_on_port) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003984 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003985 nh->nh_gw, &index);
3986 if (err)
3987 return err;
3988
3989 group_id = ROCKER_GROUP_L3_UNICAST(index);
3990 } else {
3991 /* Send to CPU for processing */
3992 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3993 }
3994
Scott Feldmanc4f20322015-05-10 09:47:50 -07003995 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003996 dst_mask, priority, goto_tbl,
3997 group_id, flags);
3998 if (err)
3999 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4000 err, &dst);
4001
4002 return err;
4003}
4004
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004005/*****************
4006 * Net device ops
4007 *****************/
4008
4009static int rocker_port_open(struct net_device *dev)
4010{
4011 struct rocker_port *rocker_port = netdev_priv(dev);
4012 int err;
4013
4014 err = rocker_port_dma_rings_init(rocker_port);
4015 if (err)
4016 return err;
4017
4018 err = request_irq(rocker_msix_tx_vector(rocker_port),
4019 rocker_tx_irq_handler, 0,
4020 rocker_driver_name, rocker_port);
4021 if (err) {
4022 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4023 goto err_request_tx_irq;
4024 }
4025
4026 err = request_irq(rocker_msix_rx_vector(rocker_port),
4027 rocker_rx_irq_handler, 0,
4028 rocker_driver_name, rocker_port);
4029 if (err) {
4030 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4031 goto err_request_rx_irq;
4032 }
4033
Scott Feldman179f9a22015-06-12 21:35:46 -07004034 err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01004035 if (err)
Scott Feldmane47172a2015-02-25 20:15:38 -08004036 goto err_fwd_enable;
Scott Feldman6c707942014-11-28 14:34:28 +01004037
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004038 napi_enable(&rocker_port->napi_tx);
4039 napi_enable(&rocker_port->napi_rx);
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004040 if (!dev->proto_down)
4041 rocker_port_set_enable(rocker_port, true);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004042 netif_start_queue(dev);
4043 return 0;
4044
Scott Feldmane47172a2015-02-25 20:15:38 -08004045err_fwd_enable:
Scott Feldman6c707942014-11-28 14:34:28 +01004046 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004047err_request_rx_irq:
4048 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4049err_request_tx_irq:
4050 rocker_port_dma_rings_fini(rocker_port);
4051 return err;
4052}
4053
4054static int rocker_port_stop(struct net_device *dev)
4055{
4056 struct rocker_port *rocker_port = netdev_priv(dev);
4057
4058 netif_stop_queue(dev);
4059 rocker_port_set_enable(rocker_port, false);
4060 napi_disable(&rocker_port->napi_rx);
4061 napi_disable(&rocker_port->napi_tx);
Scott Feldmanf66feaa2015-06-12 21:35:50 -07004062 rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE,
4063 ROCKER_OP_FLAG_NOWAIT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004064 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4065 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4066 rocker_port_dma_rings_fini(rocker_port);
4067
4068 return 0;
4069}
4070
Simon Hormane5054642015-05-25 14:28:36 +09004071static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4072 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004073{
Simon Hormane5054642015-05-25 14:28:36 +09004074 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004075 struct pci_dev *pdev = rocker->pdev;
Simon Hormane5054642015-05-25 14:28:36 +09004076 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004077 struct rocker_tlv *attr;
4078 int rem;
4079
4080 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4081 if (!attrs[ROCKER_TLV_TX_FRAGS])
4082 return;
4083 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
Simon Hormane5054642015-05-25 14:28:36 +09004084 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004085 dma_addr_t dma_handle;
4086 size_t len;
4087
4088 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4089 continue;
4090 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4091 attr);
4092 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4093 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4094 continue;
4095 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4096 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4097 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4098 }
4099}
4100
Simon Hormane5054642015-05-25 14:28:36 +09004101static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004102 struct rocker_desc_info *desc_info,
4103 char *buf, size_t buf_len)
4104{
Simon Hormane5054642015-05-25 14:28:36 +09004105 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004106 struct pci_dev *pdev = rocker->pdev;
4107 dma_addr_t dma_handle;
4108 struct rocker_tlv *frag;
4109
4110 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4111 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4112 if (net_ratelimit())
4113 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4114 return -EIO;
4115 }
4116 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4117 if (!frag)
4118 goto unmap_frag;
4119 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4120 dma_handle))
4121 goto nest_cancel;
4122 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4123 buf_len))
4124 goto nest_cancel;
4125 rocker_tlv_nest_end(desc_info, frag);
4126 return 0;
4127
4128nest_cancel:
4129 rocker_tlv_nest_cancel(desc_info, frag);
4130unmap_frag:
4131 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4132 return -EMSGSIZE;
4133}
4134
4135static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4136{
4137 struct rocker_port *rocker_port = netdev_priv(dev);
4138 struct rocker *rocker = rocker_port->rocker;
4139 struct rocker_desc_info *desc_info;
4140 struct rocker_tlv *frags;
4141 int i;
4142 int err;
4143
4144 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4145 if (unlikely(!desc_info)) {
4146 if (net_ratelimit())
4147 netdev_err(dev, "tx ring full when queue awake\n");
4148 return NETDEV_TX_BUSY;
4149 }
4150
4151 rocker_desc_cookie_ptr_set(desc_info, skb);
4152
4153 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4154 if (!frags)
4155 goto out;
4156 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4157 skb->data, skb_headlen(skb));
4158 if (err)
4159 goto nest_cancel;
4160 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
4161 goto nest_cancel;
4162
4163 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4164 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4165
4166 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4167 skb_frag_address(frag),
4168 skb_frag_size(frag));
4169 if (err)
4170 goto unmap_frags;
4171 }
4172 rocker_tlv_nest_end(desc_info, frags);
4173
4174 rocker_desc_gen_clear(desc_info);
4175 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4176
4177 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4178 if (!desc_info)
4179 netif_stop_queue(dev);
4180
4181 return NETDEV_TX_OK;
4182
4183unmap_frags:
4184 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4185nest_cancel:
4186 rocker_tlv_nest_cancel(desc_info, frags);
4187out:
4188 dev_kfree_skb(skb);
David Ahernf2bbca52015-01-16 14:22:29 -07004189 dev->stats.tx_dropped++;
4190
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004191 return NETDEV_TX_OK;
4192}
4193
4194static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4195{
4196 struct sockaddr *addr = p;
4197 struct rocker_port *rocker_port = netdev_priv(dev);
4198 int err;
4199
4200 if (!is_valid_ether_addr(addr->sa_data))
4201 return -EADDRNOTAVAIL;
4202
4203 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4204 if (err)
4205 return err;
4206 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4207 return 0;
4208}
4209
Scott Feldman77a58c72015-07-08 16:06:47 -07004210static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4211{
4212 struct rocker_port *rocker_port = netdev_priv(dev);
4213 int running = netif_running(dev);
4214 int err;
4215
4216#define ROCKER_PORT_MIN_MTU 68
4217#define ROCKER_PORT_MAX_MTU 9000
4218
4219 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4220 return -EINVAL;
4221
4222 if (running)
4223 rocker_port_stop(dev);
4224
4225 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4226 dev->mtu = new_mtu;
4227
4228 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4229 if (err)
4230 return err;
4231
4232 if (running)
4233 err = rocker_port_open(dev);
4234
4235 return err;
4236}
4237
David Aherndb191702015-03-17 20:23:16 -06004238static int rocker_port_get_phys_port_name(struct net_device *dev,
4239 char *buf, size_t len)
4240{
4241 struct rocker_port *rocker_port = netdev_priv(dev);
4242 struct port_name name = { .buf = buf, .len = len };
4243 int err;
4244
Scott Feldman179f9a22015-06-12 21:35:46 -07004245 err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
David Aherndb191702015-03-17 20:23:16 -06004246 rocker_cmd_get_port_settings_prep, NULL,
4247 rocker_cmd_get_port_settings_phys_name_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004248 &name);
David Aherndb191702015-03-17 20:23:16 -06004249
4250 return err ? -EOPNOTSUPP : 0;
4251}
4252
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004253static int rocker_port_change_proto_down(struct net_device *dev,
4254 bool proto_down)
4255{
4256 struct rocker_port *rocker_port = netdev_priv(dev);
4257
4258 if (rocker_port->dev->flags & IFF_UP)
4259 rocker_port_set_enable(rocker_port, !proto_down);
4260 rocker_port->dev->proto_down = proto_down;
4261 return 0;
4262}
4263
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004264static const struct net_device_ops rocker_port_netdev_ops = {
4265 .ndo_open = rocker_port_open,
4266 .ndo_stop = rocker_port_stop,
4267 .ndo_start_xmit = rocker_port_xmit,
4268 .ndo_set_mac_address = rocker_port_set_mac_address,
Scott Feldman77a58c72015-07-08 16:06:47 -07004269 .ndo_change_mtu = rocker_port_change_mtu,
Scott Feldman85fdb952015-05-10 09:48:05 -07004270 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
Scott Feldmanfc8f40d2015-05-10 09:47:57 -07004271 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
Scott Feldman54ba5a02015-05-10 09:48:01 -07004272 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004273 .ndo_fdb_add = switchdev_port_fdb_add,
4274 .ndo_fdb_del = switchdev_port_fdb_del,
4275 .ndo_fdb_dump = switchdev_port_fdb_dump,
David Aherndb191702015-03-17 20:23:16 -06004276 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004277 .ndo_change_proto_down = rocker_port_change_proto_down,
Scott Feldman98237d42015-03-15 21:07:15 -07004278};
4279
4280/********************
4281 * swdev interface
4282 ********************/
4283
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004284static int rocker_port_attr_get(struct net_device *dev,
4285 struct switchdev_attr *attr)
Scott Feldman98237d42015-03-15 21:07:15 -07004286{
Simon Hormane5054642015-05-25 14:28:36 +09004287 const struct rocker_port *rocker_port = netdev_priv(dev);
4288 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman98237d42015-03-15 21:07:15 -07004289
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004290 switch (attr->id) {
4291 case SWITCHDEV_ATTR_PORT_PARENT_ID:
Scott Feldman42275bd2015-05-13 11:16:50 -07004292 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4293 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004294 break;
Scott Feldman6004c862015-05-10 09:47:55 -07004295 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
Scott Feldman42275bd2015-05-13 11:16:50 -07004296 attr->u.brport_flags = rocker_port->brport_flags;
Scott Feldman6004c862015-05-10 09:47:55 -07004297 break;
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004298 default:
4299 return -EOPNOTSUPP;
4300 }
4301
Scott Feldman98237d42015-03-15 21:07:15 -07004302 return 0;
4303}
4304
Simon Hormane5054642015-05-25 14:28:36 +09004305static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
Scott Feldmanc4f20322015-05-10 09:47:50 -07004306{
4307 struct list_head *mem, *tmp;
4308
4309 list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
4310 list_del(mem);
4311 kfree(mem);
4312 }
4313}
4314
Scott Feldman6004c862015-05-10 09:47:55 -07004315static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4316 enum switchdev_trans trans,
4317 unsigned long brport_flags)
4318{
4319 unsigned long orig_flags;
4320 int err = 0;
4321
4322 orig_flags = rocker_port->brport_flags;
4323 rocker_port->brport_flags = brport_flags;
4324 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4325 err = rocker_port_set_learning(rocker_port, trans);
4326
4327 if (trans == SWITCHDEV_TRANS_PREPARE)
4328 rocker_port->brport_flags = orig_flags;
4329
4330 return err;
4331}
4332
Scott Feldmanc4f20322015-05-10 09:47:50 -07004333static int rocker_port_attr_set(struct net_device *dev,
4334 struct switchdev_attr *attr)
4335{
4336 struct rocker_port *rocker_port = netdev_priv(dev);
4337 int err = 0;
4338
4339 switch (attr->trans) {
4340 case SWITCHDEV_TRANS_PREPARE:
4341 BUG_ON(!list_empty(&rocker_port->trans_mem));
4342 break;
4343 case SWITCHDEV_TRANS_ABORT:
4344 rocker_port_trans_abort(rocker_port);
4345 return 0;
4346 default:
4347 break;
4348 }
4349
4350 switch (attr->id) {
Scott Feldman35636062015-05-10 09:47:51 -07004351 case SWITCHDEV_ATTR_PORT_STP_STATE:
Scott Feldmanac283932015-06-12 21:35:48 -07004352 err = rocker_port_stp_update(rocker_port, attr->trans,
4353 ROCKER_OP_FLAG_NOWAIT,
Scott Feldman42275bd2015-05-13 11:16:50 -07004354 attr->u.stp_state);
Scott Feldman35636062015-05-10 09:47:51 -07004355 break;
Scott Feldman6004c862015-05-10 09:47:55 -07004356 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
4357 err = rocker_port_brport_flags_set(rocker_port, attr->trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004358 attr->u.brport_flags);
Scott Feldman6004c862015-05-10 09:47:55 -07004359 break;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004360 default:
4361 err = -EOPNOTSUPP;
4362 break;
4363 }
4364
4365 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004366}
4367
Scott Feldman9228ad22015-05-10 09:47:54 -07004368static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4369 enum switchdev_trans trans, u16 vid, u16 flags)
4370{
4371 int err;
4372
4373 /* XXX deal with flags for PVID and untagged */
4374
4375 err = rocker_port_vlan(rocker_port, trans, 0, vid);
4376 if (err)
4377 return err;
4378
Scott Feldmancec04a62015-06-01 11:39:03 -07004379 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4380 if (err)
4381 rocker_port_vlan(rocker_port, trans,
4382 ROCKER_OP_FLAG_REMOVE, vid);
4383
4384 return err;
Scott Feldman9228ad22015-05-10 09:47:54 -07004385}
4386
4387static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4388 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004389 const struct switchdev_obj_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004390{
4391 u16 vid;
4392 int err;
4393
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004394 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004395 err = rocker_port_vlan_add(rocker_port, trans,
4396 vid, vlan->flags);
4397 if (err)
4398 return err;
4399 }
4400
4401 return 0;
4402}
4403
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004404static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4405 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004406 const struct switchdev_obj_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004407{
4408 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4409 int flags = 0;
4410
4411 if (!rocker_port_is_bridged(rocker_port))
4412 return -EINVAL;
4413
4414 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4415}
4416
Scott Feldman9228ad22015-05-10 09:47:54 -07004417static int rocker_port_obj_add(struct net_device *dev,
4418 struct switchdev_obj *obj)
4419{
4420 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004421 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004422 int err = 0;
4423
4424 switch (obj->trans) {
4425 case SWITCHDEV_TRANS_PREPARE:
4426 BUG_ON(!list_empty(&rocker_port->trans_mem));
4427 break;
4428 case SWITCHDEV_TRANS_ABORT:
4429 rocker_port_trans_abort(rocker_port);
4430 return 0;
4431 default:
4432 break;
4433 }
4434
4435 switch (obj->id) {
4436 case SWITCHDEV_OBJ_PORT_VLAN:
4437 err = rocker_port_vlans_add(rocker_port, obj->trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004438 &obj->u.vlan);
Scott Feldman9228ad22015-05-10 09:47:54 -07004439 break;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004440 case SWITCHDEV_OBJ_IPV4_FIB:
Scott Feldman42275bd2015-05-13 11:16:50 -07004441 fib4 = &obj->u.ipv4_fib;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004442 err = rocker_port_fib_ipv4(rocker_port, obj->trans,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004443 htonl(fib4->dst), fib4->dst_len,
Scott Feldman58c2cb12015-05-10 09:48:06 -07004444 fib4->fi, fib4->tb_id, 0);
4445 break;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004446 case SWITCHDEV_OBJ_PORT_FDB:
4447 err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
4448 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004449 default:
4450 err = -EOPNOTSUPP;
4451 break;
4452 }
4453
4454 return err;
4455}
4456
4457static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4458 u16 vid, u16 flags)
4459{
4460 int err;
4461
4462 err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
4463 ROCKER_OP_FLAG_REMOVE, htons(vid));
4464 if (err)
4465 return err;
4466
4467 return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
4468 ROCKER_OP_FLAG_REMOVE, vid);
4469}
4470
4471static int rocker_port_vlans_del(struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004472 const struct switchdev_obj_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004473{
4474 u16 vid;
4475 int err;
4476
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004477 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004478 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4479 if (err)
4480 return err;
4481 }
4482
4483 return 0;
4484}
4485
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004486static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4487 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004488 const struct switchdev_obj_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004489{
4490 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
Scott Feldmanb4ad7ba2015-06-14 11:33:11 -07004491 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004492
4493 if (!rocker_port_is_bridged(rocker_port))
4494 return -EINVAL;
4495
4496 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4497}
4498
Scott Feldman9228ad22015-05-10 09:47:54 -07004499static int rocker_port_obj_del(struct net_device *dev,
4500 struct switchdev_obj *obj)
4501{
4502 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004503 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004504 int err = 0;
4505
4506 switch (obj->id) {
4507 case SWITCHDEV_OBJ_PORT_VLAN:
Scott Feldman42275bd2015-05-13 11:16:50 -07004508 err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
Scott Feldman9228ad22015-05-10 09:47:54 -07004509 break;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004510 case SWITCHDEV_OBJ_IPV4_FIB:
Scott Feldman42275bd2015-05-13 11:16:50 -07004511 fib4 = &obj->u.ipv4_fib;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004512 err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004513 htonl(fib4->dst), fib4->dst_len,
4514 fib4->fi, fib4->tb_id,
4515 ROCKER_OP_FLAG_REMOVE);
Scott Feldman58c2cb12015-05-10 09:48:06 -07004516 break;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004517 case SWITCHDEV_OBJ_PORT_FDB:
4518 err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
4519 break;
4520 default:
4521 err = -EOPNOTSUPP;
4522 break;
4523 }
4524
4525 return err;
4526}
4527
Simon Hormane5054642015-05-25 14:28:36 +09004528static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004529 struct switchdev_obj *obj)
4530{
4531 struct rocker *rocker = rocker_port->rocker;
4532 struct switchdev_obj_fdb *fdb = &obj->u.fdb;
4533 struct rocker_fdb_tbl_entry *found;
4534 struct hlist_node *tmp;
4535 unsigned long lock_flags;
4536 int bkt;
4537 int err = 0;
4538
4539 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4540 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4541 if (found->key.pport != rocker_port->pport)
4542 continue;
4543 fdb->addr = found->key.addr;
4544 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4545 found->key.vlan_id);
4546 err = obj->cb(rocker_port->dev, obj);
4547 if (err)
4548 break;
4549 }
4550 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4551
4552 return err;
4553}
4554
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004555static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4556 struct switchdev_obj *obj)
4557{
4558 struct switchdev_obj_vlan *vlan = &obj->u.vlan;
4559 u16 vid;
4560 int err = 0;
4561
4562 for (vid = 1; vid < VLAN_N_VID; vid++) {
4563 if (!test_bit(vid, rocker_port->vlan_bitmap))
4564 continue;
4565 vlan->flags = 0;
4566 if (rocker_vlan_id_is_internal(htons(vid)))
4567 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4568 vlan->vid_begin = vlan->vid_end = vid;
4569 err = obj->cb(rocker_port->dev, obj);
4570 if (err)
4571 break;
4572 }
4573
4574 return err;
4575}
4576
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004577static int rocker_port_obj_dump(struct net_device *dev,
4578 struct switchdev_obj *obj)
4579{
Simon Hormane5054642015-05-25 14:28:36 +09004580 const struct rocker_port *rocker_port = netdev_priv(dev);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004581 int err = 0;
4582
4583 switch (obj->id) {
4584 case SWITCHDEV_OBJ_PORT_FDB:
4585 err = rocker_port_fdb_dump(rocker_port, obj);
4586 break;
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004587 case SWITCHDEV_OBJ_PORT_VLAN:
4588 err = rocker_port_vlan_dump(rocker_port, obj);
4589 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004590 default:
4591 err = -EOPNOTSUPP;
4592 break;
4593 }
4594
4595 return err;
4596}
4597
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004598static const struct switchdev_ops rocker_port_switchdev_ops = {
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004599 .switchdev_port_attr_get = rocker_port_attr_get,
Scott Feldman35636062015-05-10 09:47:51 -07004600 .switchdev_port_attr_set = rocker_port_attr_set,
Scott Feldman9228ad22015-05-10 09:47:54 -07004601 .switchdev_port_obj_add = rocker_port_obj_add,
4602 .switchdev_port_obj_del = rocker_port_obj_del,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004603 .switchdev_port_obj_dump = rocker_port_obj_dump,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004604};
4605
4606/********************
4607 * ethtool interface
4608 ********************/
4609
4610static int rocker_port_get_settings(struct net_device *dev,
4611 struct ethtool_cmd *ecmd)
4612{
4613 struct rocker_port *rocker_port = netdev_priv(dev);
4614
4615 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4616}
4617
4618static int rocker_port_set_settings(struct net_device *dev,
4619 struct ethtool_cmd *ecmd)
4620{
4621 struct rocker_port *rocker_port = netdev_priv(dev);
4622
4623 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4624}
4625
4626static void rocker_port_get_drvinfo(struct net_device *dev,
4627 struct ethtool_drvinfo *drvinfo)
4628{
4629 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4630 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4631}
4632
David Ahern9766e972015-01-29 20:59:33 -07004633static struct rocker_port_stats {
4634 char str[ETH_GSTRING_LEN];
4635 int type;
4636} rocker_port_stats[] = {
4637 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4638 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4639 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4640 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4641
4642 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4643 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4644 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4645 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4646};
4647
4648#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4649
4650static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4651 u8 *data)
4652{
4653 u8 *p = data;
4654 int i;
4655
4656 switch (stringset) {
4657 case ETH_SS_STATS:
4658 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4659 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4660 p += ETH_GSTRING_LEN;
4661 }
4662 break;
4663 }
4664}
4665
4666static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004667rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
David Ahern9766e972015-01-29 20:59:33 -07004668 struct rocker_desc_info *desc_info,
4669 void *priv)
4670{
4671 struct rocker_tlv *cmd_stats;
4672
4673 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4674 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4675 return -EMSGSIZE;
4676
4677 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4678 if (!cmd_stats)
4679 return -EMSGSIZE;
4680
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004681 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4682 rocker_port->pport))
David Ahern9766e972015-01-29 20:59:33 -07004683 return -EMSGSIZE;
4684
4685 rocker_tlv_nest_end(desc_info, cmd_stats);
4686
4687 return 0;
4688}
4689
4690static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004691rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004692 const struct rocker_desc_info *desc_info,
David Ahern9766e972015-01-29 20:59:33 -07004693 void *priv)
4694{
Simon Hormane5054642015-05-25 14:28:36 +09004695 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4696 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4697 const struct rocker_tlv *pattr;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004698 u32 pport;
David Ahern9766e972015-01-29 20:59:33 -07004699 u64 *data = priv;
4700 int i;
4701
4702 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4703
4704 if (!attrs[ROCKER_TLV_CMD_INFO])
4705 return -EIO;
4706
4707 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4708 attrs[ROCKER_TLV_CMD_INFO]);
4709
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004710 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
David Ahern9766e972015-01-29 20:59:33 -07004711 return -EIO;
4712
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004713 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4714 if (pport != rocker_port->pport)
David Ahern9766e972015-01-29 20:59:33 -07004715 return -EIO;
4716
4717 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4718 pattr = stats_attrs[rocker_port_stats[i].type];
4719 if (!pattr)
4720 continue;
4721
4722 data[i] = rocker_tlv_get_u64(pattr);
4723 }
4724
4725 return 0;
4726}
4727
4728static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4729 void *priv)
4730{
Scott Feldman179f9a22015-06-12 21:35:46 -07004731 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
David Ahern9766e972015-01-29 20:59:33 -07004732 rocker_cmd_get_port_stats_prep, NULL,
4733 rocker_cmd_get_port_stats_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004734 priv);
David Ahern9766e972015-01-29 20:59:33 -07004735}
4736
4737static void rocker_port_get_stats(struct net_device *dev,
4738 struct ethtool_stats *stats, u64 *data)
4739{
4740 struct rocker_port *rocker_port = netdev_priv(dev);
4741
4742 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4743 int i;
4744
4745 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4746 data[i] = 0;
4747 }
David Ahern9766e972015-01-29 20:59:33 -07004748}
4749
4750static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4751{
4752 switch (sset) {
4753 case ETH_SS_STATS:
4754 return ROCKER_PORT_STATS_LEN;
4755 default:
4756 return -EOPNOTSUPP;
4757 }
4758}
4759
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004760static const struct ethtool_ops rocker_port_ethtool_ops = {
4761 .get_settings = rocker_port_get_settings,
4762 .set_settings = rocker_port_set_settings,
4763 .get_drvinfo = rocker_port_get_drvinfo,
4764 .get_link = ethtool_op_get_link,
David Ahern9766e972015-01-29 20:59:33 -07004765 .get_strings = rocker_port_get_strings,
4766 .get_ethtool_stats = rocker_port_get_stats,
4767 .get_sset_count = rocker_port_get_sset_count,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004768};
4769
4770/*****************
4771 * NAPI interface
4772 *****************/
4773
4774static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4775{
4776 return container_of(napi, struct rocker_port, napi_tx);
4777}
4778
4779static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4780{
4781 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09004782 const struct rocker *rocker = rocker_port->rocker;
4783 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004784 u32 credits = 0;
4785 int err;
4786
4787 /* Cleanup tx descriptors */
4788 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
David Ahernf2bbca52015-01-16 14:22:29 -07004789 struct sk_buff *skb;
4790
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004791 err = rocker_desc_err(desc_info);
4792 if (err && net_ratelimit())
4793 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4794 err);
4795 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
David Ahernf2bbca52015-01-16 14:22:29 -07004796
4797 skb = rocker_desc_cookie_ptr_get(desc_info);
4798 if (err == 0) {
4799 rocker_port->dev->stats.tx_packets++;
4800 rocker_port->dev->stats.tx_bytes += skb->len;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004801 } else {
David Ahernf2bbca52015-01-16 14:22:29 -07004802 rocker_port->dev->stats.tx_errors++;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004803 }
David Ahernf2bbca52015-01-16 14:22:29 -07004804
4805 dev_kfree_skb_any(skb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004806 credits++;
4807 }
4808
4809 if (credits && netif_queue_stopped(rocker_port->dev))
4810 netif_wake_queue(rocker_port->dev);
4811
4812 napi_complete(napi);
4813 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4814
4815 return 0;
4816}
4817
Simon Hormane5054642015-05-25 14:28:36 +09004818static int rocker_port_rx_proc(const struct rocker *rocker,
4819 const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004820 struct rocker_desc_info *desc_info)
4821{
Simon Hormane5054642015-05-25 14:28:36 +09004822 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004823 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4824 size_t rx_len;
4825
4826 if (!skb)
4827 return -ENOENT;
4828
4829 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4830 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4831 return -EINVAL;
4832
4833 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4834
4835 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4836 skb_put(skb, rx_len);
4837 skb->protocol = eth_type_trans(skb, rocker_port->dev);
David Ahernf2bbca52015-01-16 14:22:29 -07004838
4839 rocker_port->dev->stats.rx_packets++;
4840 rocker_port->dev->stats.rx_bytes += skb->len;
4841
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004842 netif_receive_skb(skb);
4843
Simon Horman534ba6a2015-06-01 13:25:04 +09004844 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004845}
4846
4847static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4848{
4849 return container_of(napi, struct rocker_port, napi_rx);
4850}
4851
4852static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4853{
4854 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09004855 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004856 struct rocker_desc_info *desc_info;
4857 u32 credits = 0;
4858 int err;
4859
4860 /* Process rx descriptors */
4861 while (credits < budget &&
4862 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4863 err = rocker_desc_err(desc_info);
4864 if (err) {
4865 if (net_ratelimit())
4866 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4867 err);
4868 } else {
4869 err = rocker_port_rx_proc(rocker, rocker_port,
4870 desc_info);
4871 if (err && net_ratelimit())
4872 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4873 err);
4874 }
David Ahernf2bbca52015-01-16 14:22:29 -07004875 if (err)
4876 rocker_port->dev->stats.rx_errors++;
4877
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004878 rocker_desc_gen_clear(desc_info);
4879 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4880 credits++;
4881 }
4882
4883 if (credits < budget)
4884 napi_complete(napi);
4885
4886 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4887
4888 return credits;
4889}
4890
4891/*****************
4892 * PCI driver ops
4893 *****************/
4894
Simon Hormane5054642015-05-25 14:28:36 +09004895static void rocker_carrier_init(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004896{
Simon Hormane5054642015-05-25 14:28:36 +09004897 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004898 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4899 bool link_up;
4900
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004901 link_up = link_status & (1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004902 if (link_up)
4903 netif_carrier_on(rocker_port->dev);
4904 else
4905 netif_carrier_off(rocker_port->dev);
4906}
4907
Simon Hormane5054642015-05-25 14:28:36 +09004908static void rocker_remove_ports(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004909{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004910 struct rocker_port *rocker_port;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004911 int i;
4912
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004913 for (i = 0; i < rocker->port_count; i++) {
4914 rocker_port = rocker->ports[i];
Scott Feldmana0720312015-06-12 21:09:44 -07004915 if (!rocker_port)
4916 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004917 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4918 ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004919 unregister_netdev(rocker_port->dev);
4920 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004921 kfree(rocker->ports);
4922}
4923
Simon Horman534ba6a2015-06-01 13:25:04 +09004924static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004925{
Simon Horman534ba6a2015-06-01 13:25:04 +09004926 const struct rocker *rocker = rocker_port->rocker;
Simon Hormane5054642015-05-25 14:28:36 +09004927 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004928 int err;
4929
4930 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4931 rocker_port->dev->dev_addr);
4932 if (err) {
4933 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4934 eth_hw_addr_random(rocker_port->dev);
4935 }
4936}
4937
4938static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4939{
Simon Hormane5054642015-05-25 14:28:36 +09004940 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004941 struct rocker_port *rocker_port;
4942 struct net_device *dev;
Scott Feldmanbcfd7802015-06-01 11:39:04 -07004943 u16 untagged_vid = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004944 int err;
4945
4946 dev = alloc_etherdev(sizeof(struct rocker_port));
4947 if (!dev)
4948 return -ENOMEM;
4949 rocker_port = netdev_priv(dev);
4950 rocker_port->dev = dev;
4951 rocker_port->rocker = rocker;
4952 rocker_port->port_number = port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004953 rocker_port->pport = port_number + 1;
Scott Feldman5111f802014-11-28 14:34:30 +01004954 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004955 INIT_LIST_HEAD(&rocker_port->trans_mem);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004956
Simon Horman534ba6a2015-06-01 13:25:04 +09004957 rocker_port_dev_addr_init(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004958 dev->netdev_ops = &rocker_port_netdev_ops;
4959 dev->ethtool_ops = &rocker_port_ethtool_ops;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004960 dev->switchdev_ops = &rocker_port_switchdev_ops;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004961 netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4962 NAPI_POLL_WEIGHT);
4963 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4964 NAPI_POLL_WEIGHT);
4965 rocker_carrier_init(rocker_port);
4966
Scott Feldman2aa2ed02015-06-01 11:39:06 -07004967 dev->features |= NETIF_F_NETNS_LOCAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004968
4969 err = register_netdev(dev);
4970 if (err) {
4971 dev_err(&pdev->dev, "register_netdev failed\n");
4972 goto err_register_netdev;
4973 }
4974 rocker->ports[port_number] = rocker_port;
4975
Scott Feldmanc4f20322015-05-10 09:47:50 -07004976 rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
Scott Feldman5111f802014-11-28 14:34:30 +01004977
Scott Feldmanc4f20322015-05-10 09:47:50 -07004978 err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004979 if (err) {
4980 dev_err(&pdev->dev, "install ig port table failed\n");
4981 goto err_port_ig_tbl;
4982 }
4983
Scott Feldmanbcfd7802015-06-01 11:39:04 -07004984 rocker_port->internal_vlan_id =
4985 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
4986
4987 err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
4988 untagged_vid, 0);
4989 if (err) {
4990 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
4991 goto err_untagged_vlan;
4992 }
4993
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004994 return 0;
4995
Scott Feldmanbcfd7802015-06-01 11:39:04 -07004996err_untagged_vlan:
4997 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4998 ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004999err_port_ig_tbl:
5000 unregister_netdev(dev);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005001err_register_netdev:
5002 free_netdev(dev);
5003 return err;
5004}
5005
5006static int rocker_probe_ports(struct rocker *rocker)
5007{
5008 int i;
5009 size_t alloc_size;
5010 int err;
5011
5012 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
Scott Feldman27b808c2015-06-01 11:39:02 -07005013 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
Dan Carpentere65ad3b2015-02-25 16:35:32 +03005014 if (!rocker->ports)
5015 return -ENOMEM;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005016 for (i = 0; i < rocker->port_count; i++) {
5017 err = rocker_probe_port(rocker, i);
5018 if (err)
5019 goto remove_ports;
5020 }
5021 return 0;
5022
5023remove_ports:
5024 rocker_remove_ports(rocker);
5025 return err;
5026}
5027
5028static int rocker_msix_init(struct rocker *rocker)
5029{
5030 struct pci_dev *pdev = rocker->pdev;
5031 int msix_entries;
5032 int i;
5033 int err;
5034
5035 msix_entries = pci_msix_vec_count(pdev);
5036 if (msix_entries < 0)
5037 return msix_entries;
5038
5039 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5040 return -EINVAL;
5041
5042 rocker->msix_entries = kmalloc_array(msix_entries,
5043 sizeof(struct msix_entry),
5044 GFP_KERNEL);
5045 if (!rocker->msix_entries)
5046 return -ENOMEM;
5047
5048 for (i = 0; i < msix_entries; i++)
5049 rocker->msix_entries[i].entry = i;
5050
5051 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5052 if (err < 0)
5053 goto err_enable_msix;
5054
5055 return 0;
5056
5057err_enable_msix:
5058 kfree(rocker->msix_entries);
5059 return err;
5060}
5061
Simon Hormane5054642015-05-25 14:28:36 +09005062static void rocker_msix_fini(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005063{
5064 pci_disable_msix(rocker->pdev);
5065 kfree(rocker->msix_entries);
5066}
5067
5068static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5069{
5070 struct rocker *rocker;
5071 int err;
5072
5073 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5074 if (!rocker)
5075 return -ENOMEM;
5076
5077 err = pci_enable_device(pdev);
5078 if (err) {
5079 dev_err(&pdev->dev, "pci_enable_device failed\n");
5080 goto err_pci_enable_device;
5081 }
5082
5083 err = pci_request_regions(pdev, rocker_driver_name);
5084 if (err) {
5085 dev_err(&pdev->dev, "pci_request_regions failed\n");
5086 goto err_pci_request_regions;
5087 }
5088
5089 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5090 if (!err) {
5091 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5092 if (err) {
5093 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5094 goto err_pci_set_dma_mask;
5095 }
5096 } else {
5097 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5098 if (err) {
5099 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5100 goto err_pci_set_dma_mask;
5101 }
5102 }
5103
5104 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5105 dev_err(&pdev->dev, "invalid PCI region size\n");
Wei Yongjun3122a922015-04-16 20:21:02 +08005106 err = -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005107 goto err_pci_resource_len_check;
5108 }
5109
5110 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5111 pci_resource_len(pdev, 0));
5112 if (!rocker->hw_addr) {
5113 dev_err(&pdev->dev, "ioremap failed\n");
5114 err = -EIO;
5115 goto err_ioremap;
5116 }
5117 pci_set_master(pdev);
5118
5119 rocker->pdev = pdev;
5120 pci_set_drvdata(pdev, rocker);
5121
5122 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5123
5124 err = rocker_msix_init(rocker);
5125 if (err) {
5126 dev_err(&pdev->dev, "MSI-X init failed\n");
5127 goto err_msix_init;
5128 }
5129
5130 err = rocker_basic_hw_test(rocker);
5131 if (err) {
5132 dev_err(&pdev->dev, "basic hw test failed\n");
5133 goto err_basic_hw_test;
5134 }
5135
5136 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5137
5138 err = rocker_dma_rings_init(rocker);
5139 if (err)
5140 goto err_dma_rings_init;
5141
5142 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5143 rocker_cmd_irq_handler, 0,
5144 rocker_driver_name, rocker);
5145 if (err) {
5146 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5147 goto err_request_cmd_irq;
5148 }
5149
5150 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5151 rocker_event_irq_handler, 0,
5152 rocker_driver_name, rocker);
5153 if (err) {
5154 dev_err(&pdev->dev, "cannot assign event irq\n");
5155 goto err_request_event_irq;
5156 }
5157
5158 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5159
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005160 err = rocker_init_tbls(rocker);
5161 if (err) {
5162 dev_err(&pdev->dev, "cannot init rocker tables\n");
5163 goto err_init_tbls;
5164 }
5165
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005166 err = rocker_probe_ports(rocker);
5167 if (err) {
5168 dev_err(&pdev->dev, "failed to probe ports\n");
5169 goto err_probe_ports;
5170 }
5171
5172 dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
5173
5174 return 0;
5175
5176err_probe_ports:
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005177 rocker_free_tbls(rocker);
5178err_init_tbls:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005179 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5180err_request_event_irq:
5181 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5182err_request_cmd_irq:
5183 rocker_dma_rings_fini(rocker);
5184err_dma_rings_init:
5185err_basic_hw_test:
5186 rocker_msix_fini(rocker);
5187err_msix_init:
5188 iounmap(rocker->hw_addr);
5189err_ioremap:
5190err_pci_resource_len_check:
5191err_pci_set_dma_mask:
5192 pci_release_regions(pdev);
5193err_pci_request_regions:
5194 pci_disable_device(pdev);
5195err_pci_enable_device:
5196 kfree(rocker);
5197 return err;
5198}
5199
5200static void rocker_remove(struct pci_dev *pdev)
5201{
5202 struct rocker *rocker = pci_get_drvdata(pdev);
5203
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005204 rocker_free_tbls(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005205 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5206 rocker_remove_ports(rocker);
5207 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5208 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5209 rocker_dma_rings_fini(rocker);
5210 rocker_msix_fini(rocker);
5211 iounmap(rocker->hw_addr);
5212 pci_release_regions(rocker->pdev);
5213 pci_disable_device(rocker->pdev);
5214 kfree(rocker);
5215}
5216
5217static struct pci_driver rocker_pci_driver = {
5218 .name = rocker_driver_name,
5219 .id_table = rocker_pci_id_table,
5220 .probe = rocker_probe,
5221 .remove = rocker_remove,
5222};
5223
Scott Feldman6c707942014-11-28 14:34:28 +01005224/************************************
5225 * Net device notifier event handler
5226 ************************************/
5227
Simon Hormane5054642015-05-25 14:28:36 +09005228static bool rocker_port_dev_check(const struct net_device *dev)
Scott Feldman6c707942014-11-28 14:34:28 +01005229{
5230 return dev->netdev_ops == &rocker_port_netdev_ops;
5231}
5232
5233static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5234 struct net_device *bridge)
5235{
Scott Feldman027e00d2015-06-01 11:39:05 -07005236 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005237 int err;
5238
Scott Feldman027e00d2015-06-01 11:39:05 -07005239 /* Port is joining bridge, so the internal VLAN for the
5240 * port is going to change to the bridge internal VLAN.
5241 * Let's remove untagged VLAN (vid=0) from port and
5242 * re-add once internal VLAN has changed.
5243 */
5244
5245 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5246 if (err)
5247 return err;
5248
Simon Hormandf6a2062015-05-21 12:40:17 +09005249 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005250 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005251 rocker_port->internal_vlan_id =
5252 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005253
5254 rocker_port->bridge_dev = bridge;
5255
Scott Feldman027e00d2015-06-01 11:39:05 -07005256 return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5257 untagged_vid, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005258}
5259
5260static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5261{
Scott Feldman027e00d2015-06-01 11:39:05 -07005262 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005263 int err;
5264
Scott Feldman027e00d2015-06-01 11:39:05 -07005265 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5266 if (err)
5267 return err;
5268
Simon Hormandf6a2062015-05-21 12:40:17 +09005269 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005270 rocker_port->bridge_dev->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005271 rocker_port->internal_vlan_id =
5272 rocker_port_internal_vlan_id_get(rocker_port,
5273 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005274
5275 rocker_port->bridge_dev = NULL;
5276
5277 err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5278 untagged_vid, 0);
Scott Feldmane47172a2015-02-25 20:15:38 -08005279 if (err)
5280 return err;
5281
5282 if (rocker_port->dev->flags & IFF_UP)
Scott Feldman179f9a22015-06-12 21:35:46 -07005283 err = rocker_port_fwd_enable(rocker_port,
5284 SWITCHDEV_TRANS_NONE, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005285
5286 return err;
5287}
5288
Simon Horman82549732015-07-16 10:39:14 +09005289
5290static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5291 struct net_device *master)
5292{
5293 int err;
5294
5295 rocker_port->bridge_dev = master;
5296
5297 err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
5298 if (err)
5299 return err;
5300 err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
5301
5302 return err;
5303}
5304
Scott Feldman6c707942014-11-28 14:34:28 +01005305static int rocker_port_master_changed(struct net_device *dev)
5306{
5307 struct rocker_port *rocker_port = netdev_priv(dev);
5308 struct net_device *master = netdev_master_upper_dev_get(dev);
5309 int err = 0;
5310
Simon Horman82549732015-07-16 10:39:14 +09005311 /* N.B: Do nothing if the type of master is not supported */
5312 if (master && master->rtnl_link_ops) {
5313 if (!strcmp(master->rtnl_link_ops->kind, "bridge"))
5314 err = rocker_port_bridge_join(rocker_port, master);
5315 else if (!strcmp(master->rtnl_link_ops->kind, "openvswitch"))
5316 err = rocker_port_ovs_changed(rocker_port, master);
5317 } else if (rocker_port_is_bridged(rocker_port)) {
Scott Feldman6c707942014-11-28 14:34:28 +01005318 err = rocker_port_bridge_leave(rocker_port);
Simon Horman82549732015-07-16 10:39:14 +09005319 } else if (rocker_port_is_ovsed(rocker_port)) {
5320 err = rocker_port_ovs_changed(rocker_port, NULL);
5321 }
Scott Feldman6c707942014-11-28 14:34:28 +01005322
5323 return err;
5324}
5325
5326static int rocker_netdevice_event(struct notifier_block *unused,
5327 unsigned long event, void *ptr)
5328{
5329 struct net_device *dev;
5330 int err;
5331
5332 switch (event) {
5333 case NETDEV_CHANGEUPPER:
5334 dev = netdev_notifier_info_to_dev(ptr);
5335 if (!rocker_port_dev_check(dev))
5336 return NOTIFY_DONE;
5337 err = rocker_port_master_changed(dev);
5338 if (err)
5339 netdev_warn(dev,
5340 "failed to reflect master change (err %d)\n",
5341 err);
5342 break;
5343 }
5344
5345 return NOTIFY_DONE;
5346}
5347
5348static struct notifier_block rocker_netdevice_nb __read_mostly = {
5349 .notifier_call = rocker_netdevice_event,
5350};
5351
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005352/************************************
5353 * Net event notifier event handler
5354 ************************************/
5355
5356static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5357{
5358 struct rocker_port *rocker_port = netdev_priv(dev);
Scott Feldman02a9fbf2015-06-12 21:35:47 -07005359 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5360 ROCKER_OP_FLAG_NOWAIT;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005361 __be32 ip_addr = *(__be32 *)n->primary_key;
5362
Scott Feldmanc4f20322015-05-10 09:47:50 -07005363 return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
5364 flags, ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005365}
5366
5367static int rocker_netevent_event(struct notifier_block *unused,
5368 unsigned long event, void *ptr)
5369{
5370 struct net_device *dev;
5371 struct neighbour *n = ptr;
5372 int err;
5373
5374 switch (event) {
5375 case NETEVENT_NEIGH_UPDATE:
5376 if (n->tbl != &arp_tbl)
5377 return NOTIFY_DONE;
5378 dev = n->dev;
5379 if (!rocker_port_dev_check(dev))
5380 return NOTIFY_DONE;
5381 err = rocker_neigh_update(dev, n);
5382 if (err)
5383 netdev_warn(dev,
5384 "failed to handle neigh update (err %d)\n",
5385 err);
5386 break;
5387 }
5388
5389 return NOTIFY_DONE;
5390}
5391
5392static struct notifier_block rocker_netevent_nb __read_mostly = {
5393 .notifier_call = rocker_netevent_event,
5394};
5395
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005396/***********************
5397 * Module init and exit
5398 ***********************/
5399
5400static int __init rocker_module_init(void)
5401{
Scott Feldman6c707942014-11-28 14:34:28 +01005402 int err;
5403
5404 register_netdevice_notifier(&rocker_netdevice_nb);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005405 register_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005406 err = pci_register_driver(&rocker_pci_driver);
5407 if (err)
5408 goto err_pci_register_driver;
5409 return 0;
5410
5411err_pci_register_driver:
Gilad Ben-Yossefa076e6b2015-06-23 10:52:10 +03005412 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005413 unregister_netdevice_notifier(&rocker_netdevice_nb);
5414 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005415}
5416
5417static void __exit rocker_module_exit(void)
5418{
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005419 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005420 unregister_netdevice_notifier(&rocker_netdevice_nb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005421 pci_unregister_driver(&rocker_pci_driver);
5422}
5423
5424module_init(rocker_module_init);
5425module_exit(rocker_module_exit);
5426
5427MODULE_LICENSE("GPL v2");
5428MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5429MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5430MODULE_DESCRIPTION("Rocker switch device driver");
5431MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);