blob: 32c5429ea5fee4904802fd0461fd0afa08b1d003 [file] [log] [blame]
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010019#include <linux/hashtable.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010020#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
Scott Feldman6c707942014-11-28 14:34:28 +010031#include <linux/if_bridge.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010032#include <linux/bitops.h>
David Aherndb191702015-03-17 20:23:16 -060033#include <linux/ctype.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010034#include <net/switchdev.h>
35#include <net/rtnetlink.h>
Scott Feldmanc1beeef2015-03-05 21:21:20 -080036#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010039#include <asm-generic/io-64-nonatomic-lo-hi.h>
40#include <generated/utsrelease.h>
41
42#include "rocker.h"
43
44static const char rocker_driver_name[] = "rocker";
45
46static const struct pci_device_id rocker_pci_id_table[] = {
47 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
48 {0, }
49};
50
Scott Feldman9f6bbf72014-11-28 14:34:27 +010051struct rocker_flow_tbl_key {
52 u32 priority;
53 enum rocker_of_dpa_table_id tbl_id;
54 union {
55 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080056 u32 in_pport;
57 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010058 enum rocker_of_dpa_table_id goto_tbl;
59 } ig_port;
60 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080061 u32 in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010062 __be16 vlan_id;
63 __be16 vlan_id_mask;
64 enum rocker_of_dpa_table_id goto_tbl;
65 bool untagged;
66 __be16 new_vlan_id;
67 } vlan;
68 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080069 u32 in_pport;
70 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010071 __be16 eth_type;
72 u8 eth_dst[ETH_ALEN];
73 u8 eth_dst_mask[ETH_ALEN];
74 __be16 vlan_id;
75 __be16 vlan_id_mask;
76 enum rocker_of_dpa_table_id goto_tbl;
77 bool copy_to_cpu;
78 } term_mac;
79 struct {
80 __be16 eth_type;
81 __be32 dst4;
82 __be32 dst4_mask;
83 enum rocker_of_dpa_table_id goto_tbl;
84 u32 group_id;
85 } ucast_routing;
86 struct {
87 u8 eth_dst[ETH_ALEN];
88 u8 eth_dst_mask[ETH_ALEN];
89 int has_eth_dst;
90 int has_eth_dst_mask;
91 __be16 vlan_id;
92 u32 tunnel_id;
93 enum rocker_of_dpa_table_id goto_tbl;
94 u32 group_id;
95 bool copy_to_cpu;
96 } bridge;
97 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080098 u32 in_pport;
99 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100100 u8 eth_src[ETH_ALEN];
101 u8 eth_src_mask[ETH_ALEN];
102 u8 eth_dst[ETH_ALEN];
103 u8 eth_dst_mask[ETH_ALEN];
104 __be16 eth_type;
105 __be16 vlan_id;
106 __be16 vlan_id_mask;
107 u8 ip_proto;
108 u8 ip_proto_mask;
109 u8 ip_tos;
110 u8 ip_tos_mask;
111 u32 group_id;
112 } acl;
113 };
114};
115
116struct rocker_flow_tbl_entry {
117 struct hlist_node entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800118 u32 cmd;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100119 u64 cookie;
120 struct rocker_flow_tbl_key key;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800121 size_t key_len;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100122 u32 key_crc32; /* key */
123};
124
125struct rocker_group_tbl_entry {
126 struct hlist_node entry;
127 u32 cmd;
128 u32 group_id; /* key */
129 u16 group_count;
130 u32 *group_ids;
131 union {
132 struct {
133 u8 pop_vlan;
134 } l2_interface;
135 struct {
136 u8 eth_src[ETH_ALEN];
137 u8 eth_dst[ETH_ALEN];
138 __be16 vlan_id;
139 u32 group_id;
140 } l2_rewrite;
141 struct {
142 u8 eth_src[ETH_ALEN];
143 u8 eth_dst[ETH_ALEN];
144 __be16 vlan_id;
145 bool ttl_check;
146 u32 group_id;
147 } l3_unicast;
148 };
149};
150
151struct rocker_fdb_tbl_entry {
152 struct hlist_node entry;
153 u32 key_crc32; /* key */
154 bool learned;
Scott Feldmana471be42015-09-23 08:39:14 -0700155 unsigned long touched;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100156 struct rocker_fdb_tbl_key {
Scott Feldman4c660492015-09-23 08:39:15 -0700157 struct rocker_port *rocker_port;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100158 u8 addr[ETH_ALEN];
159 __be16 vlan_id;
160 } key;
161};
162
163struct rocker_internal_vlan_tbl_entry {
164 struct hlist_node entry;
165 int ifindex; /* key */
166 u32 ref_count;
167 __be16 vlan_id;
168};
169
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800170struct rocker_neigh_tbl_entry {
171 struct hlist_node entry;
172 __be32 ip_addr; /* key */
173 struct net_device *dev;
174 u32 ref_count;
175 u32 index;
176 u8 eth_dst[ETH_ALEN];
177 bool ttl_check;
178};
179
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100180struct rocker_desc_info {
181 char *data; /* mapped */
182 size_t data_size;
183 size_t tlv_size;
184 struct rocker_desc *desc;
Scott Feldman4725ceb2015-05-10 09:48:08 -0700185 dma_addr_t mapaddr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100186};
187
188struct rocker_dma_ring_info {
189 size_t size;
190 u32 head;
191 u32 tail;
192 struct rocker_desc *desc; /* mapped */
193 dma_addr_t mapaddr;
194 struct rocker_desc_info *desc_info;
195 unsigned int type;
196};
197
198struct rocker;
199
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100200enum {
201 ROCKER_CTRL_LINK_LOCAL_MCAST,
202 ROCKER_CTRL_LOCAL_ARP,
203 ROCKER_CTRL_IPV4_MCAST,
204 ROCKER_CTRL_IPV6_MCAST,
205 ROCKER_CTRL_DFLT_BRIDGING,
Simon Horman82549732015-07-16 10:39:14 +0900206 ROCKER_CTRL_DFLT_OVS,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100207 ROCKER_CTRL_MAX,
208};
209
210#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
211#define ROCKER_N_INTERNAL_VLANS 255
212#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
213#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
214
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100215struct rocker_port {
216 struct net_device *dev;
Scott Feldman6c707942014-11-28 14:34:28 +0100217 struct net_device *bridge_dev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100218 struct rocker *rocker;
219 unsigned int port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800220 u32 pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100221 __be16 internal_vlan_id;
Scott Feldman6c707942014-11-28 14:34:28 +0100222 int stp_state;
Scott Feldman5111f802014-11-28 14:34:30 +0100223 u32 brport_flags;
Scott Feldmane7335702015-09-23 08:39:17 -0700224 unsigned long ageing_time;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100225 bool ctrls[ROCKER_CTRL_MAX];
226 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100227 struct napi_struct napi_tx;
228 struct napi_struct napi_rx;
229 struct rocker_dma_ring_info tx_ring;
230 struct rocker_dma_ring_info rx_ring;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700231 struct list_head trans_mem;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100232};
233
234struct rocker {
235 struct pci_dev *pdev;
236 u8 __iomem *hw_addr;
237 struct msix_entry *msix_entries;
238 unsigned int port_count;
239 struct rocker_port **ports;
240 struct {
241 u64 id;
242 } hw;
Scott Feldman4725ceb2015-05-10 09:48:08 -0700243 spinlock_t cmd_ring_lock; /* for cmd ring accesses */
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100244 struct rocker_dma_ring_info cmd_ring;
245 struct rocker_dma_ring_info event_ring;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100246 DECLARE_HASHTABLE(flow_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700247 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100248 u64 flow_tbl_next_cookie;
249 DECLARE_HASHTABLE(group_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700250 spinlock_t group_tbl_lock; /* for group tbl accesses */
Scott Feldman52fe3e22015-09-23 08:39:18 -0700251 struct timer_list fdb_cleanup_timer;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100252 DECLARE_HASHTABLE(fdb_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700253 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100254 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
255 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700256 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800257 DECLARE_HASHTABLE(neigh_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700258 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800259 u32 neigh_tbl_next_index;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100260};
261
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100262static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
263static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
264static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
265static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
266static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
267static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
268static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
269static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
270static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
271
272/* Rocker priority levels for flow table entries. Higher
273 * priority match takes precedence over lower priority match.
274 */
275
276enum {
277 ROCKER_PRIORITY_UNKNOWN = 0,
278 ROCKER_PRIORITY_IG_PORT = 1,
279 ROCKER_PRIORITY_VLAN = 1,
280 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
281 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100282 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
283 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
284 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
285 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
286 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
287 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
288 ROCKER_PRIORITY_ACL_CTRL = 3,
289 ROCKER_PRIORITY_ACL_NORMAL = 2,
290 ROCKER_PRIORITY_ACL_DFLT = 1,
291};
292
293static bool rocker_vlan_id_is_internal(__be16 vlan_id)
294{
295 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
296 u16 end = 0xffe;
297 u16 _vlan_id = ntohs(vlan_id);
298
299 return (_vlan_id >= start && _vlan_id <= end);
300}
301
Simon Hormane5054642015-05-25 14:28:36 +0900302static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100303 u16 vid, bool *pop_vlan)
304{
305 __be16 vlan_id;
306
307 if (pop_vlan)
308 *pop_vlan = false;
309 vlan_id = htons(vid);
310 if (!vlan_id) {
311 vlan_id = rocker_port->internal_vlan_id;
312 if (pop_vlan)
313 *pop_vlan = true;
314 }
315
316 return vlan_id;
317}
318
Simon Hormane5054642015-05-25 14:28:36 +0900319static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +0100320 __be16 vlan_id)
321{
322 if (rocker_vlan_id_is_internal(vlan_id))
323 return 0;
324
325 return ntohs(vlan_id);
326}
327
Simon Hormane5054642015-05-25 14:28:36 +0900328static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +0100329{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200330 return rocker_port->bridge_dev &&
331 netif_is_bridge_master(rocker_port->bridge_dev);
Simon Horman82549732015-07-16 10:39:14 +0900332}
333
334static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
335{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200336 return rocker_port->bridge_dev &&
337 netif_is_ovs_master(rocker_port->bridge_dev);
Scott Feldman6c707942014-11-28 14:34:28 +0100338}
339
Scott Feldman179f9a22015-06-12 21:35:46 -0700340#define ROCKER_OP_FLAG_REMOVE BIT(0)
341#define ROCKER_OP_FLAG_NOWAIT BIT(1)
342#define ROCKER_OP_FLAG_LEARNED BIT(2)
343#define ROCKER_OP_FLAG_REFRESH BIT(3)
344
Scott Feldmanc4f20322015-05-10 09:47:50 -0700345static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700346 enum switchdev_trans trans, int flags,
347 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700348{
349 struct list_head *elem = NULL;
Scott Feldman179f9a22015-06-12 21:35:46 -0700350 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
351 GFP_ATOMIC : GFP_KERNEL;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700352
353 /* If in transaction prepare phase, allocate the memory
354 * and enqueue it on a per-port list. If in transaction
355 * commit phase, dequeue the memory from the per-port list
356 * rather than re-allocating the memory. The idea is the
357 * driver code paths for prepare and commit are identical
358 * so the memory allocated in the prepare phase is the
359 * memory used in the commit phase.
360 */
361
362 switch (trans) {
363 case SWITCHDEV_TRANS_PREPARE:
Scott Feldman179f9a22015-06-12 21:35:46 -0700364 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700365 if (!elem)
366 return NULL;
367 list_add_tail(elem, &rocker_port->trans_mem);
368 break;
369 case SWITCHDEV_TRANS_COMMIT:
370 BUG_ON(list_empty(&rocker_port->trans_mem));
371 elem = rocker_port->trans_mem.next;
372 list_del_init(elem);
373 break;
374 case SWITCHDEV_TRANS_NONE:
Scott Feldman179f9a22015-06-12 21:35:46 -0700375 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700376 if (elem)
377 INIT_LIST_HEAD(elem);
378 break;
379 default:
380 break;
381 }
382
383 return elem ? elem + 1 : NULL;
384}
385
386static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700387 enum switchdev_trans trans, int flags,
388 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700389{
Scott Feldman179f9a22015-06-12 21:35:46 -0700390 return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700391}
392
393static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700394 enum switchdev_trans trans, int flags,
395 size_t n, size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700396{
Scott Feldman179f9a22015-06-12 21:35:46 -0700397 return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700398}
399
Simon Horman0985df72015-05-25 14:28:35 +0900400static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700401{
402 struct list_head *elem;
403
404 /* Frees are ignored if in transaction prepare phase. The
405 * memory remains on the per-port list until freed in the
406 * commit phase.
407 */
408
409 if (trans == SWITCHDEV_TRANS_PREPARE)
410 return;
411
412 elem = (struct list_head *)mem - 1;
413 BUG_ON(!list_empty(elem));
414 kfree(elem);
415}
416
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100417struct rocker_wait {
418 wait_queue_head_t wait;
419 bool done;
Scott Feldman179f9a22015-06-12 21:35:46 -0700420 bool nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100421};
422
423static void rocker_wait_reset(struct rocker_wait *wait)
424{
425 wait->done = false;
Scott Feldman179f9a22015-06-12 21:35:46 -0700426 wait->nowait = false;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100427}
428
429static void rocker_wait_init(struct rocker_wait *wait)
430{
431 init_waitqueue_head(&wait->wait);
432 rocker_wait_reset(wait);
433}
434
Scott Feldmanc4f20322015-05-10 09:47:50 -0700435static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700436 enum switchdev_trans trans,
437 int flags)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100438{
439 struct rocker_wait *wait;
440
Scott Feldman179f9a22015-06-12 21:35:46 -0700441 wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100442 if (!wait)
443 return NULL;
444 rocker_wait_init(wait);
445 return wait;
446}
447
Simon Horman0985df72015-05-25 14:28:35 +0900448static void rocker_wait_destroy(enum switchdev_trans trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -0700449 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100450{
Simon Horman0985df72015-05-25 14:28:35 +0900451 rocker_port_kfree(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100452}
453
454static bool rocker_wait_event_timeout(struct rocker_wait *wait,
455 unsigned long timeout)
456{
457 wait_event_timeout(wait->wait, wait->done, HZ / 10);
458 if (!wait->done)
459 return false;
460 return true;
461}
462
463static void rocker_wait_wake_up(struct rocker_wait *wait)
464{
465 wait->done = true;
466 wake_up(&wait->wait);
467}
468
Simon Hormane5054642015-05-25 14:28:36 +0900469static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100470{
471 return rocker->msix_entries[vector].vector;
472}
473
Simon Hormane5054642015-05-25 14:28:36 +0900474static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100475{
476 return rocker_msix_vector(rocker_port->rocker,
477 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
478}
479
Simon Hormane5054642015-05-25 14:28:36 +0900480static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100481{
482 return rocker_msix_vector(rocker_port->rocker,
483 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
484}
485
486#define rocker_write32(rocker, reg, val) \
487 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
488#define rocker_read32(rocker, reg) \
489 readl((rocker)->hw_addr + (ROCKER_ ## reg))
490#define rocker_write64(rocker, reg, val) \
491 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
492#define rocker_read64(rocker, reg) \
493 readq((rocker)->hw_addr + (ROCKER_ ## reg))
494
495/*****************************
496 * HW basic testing functions
497 *****************************/
498
Simon Hormane5054642015-05-25 14:28:36 +0900499static int rocker_reg_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100500{
Simon Hormane5054642015-05-25 14:28:36 +0900501 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100502 u64 test_reg;
503 u64 rnd;
504
505 rnd = prandom_u32();
506 rnd >>= 1;
507 rocker_write32(rocker, TEST_REG, rnd);
508 test_reg = rocker_read32(rocker, TEST_REG);
509 if (test_reg != rnd * 2) {
510 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
511 test_reg, rnd * 2);
512 return -EIO;
513 }
514
515 rnd = prandom_u32();
516 rnd <<= 31;
517 rnd |= prandom_u32();
518 rocker_write64(rocker, TEST_REG64, rnd);
519 test_reg = rocker_read64(rocker, TEST_REG64);
520 if (test_reg != rnd * 2) {
521 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
522 test_reg, rnd * 2);
523 return -EIO;
524 }
525
526 return 0;
527}
528
Simon Hormane5054642015-05-25 14:28:36 +0900529static int rocker_dma_test_one(const struct rocker *rocker,
530 struct rocker_wait *wait, u32 test_type,
531 dma_addr_t dma_handle, const unsigned char *buf,
532 const unsigned char *expect, size_t size)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100533{
Simon Hormane5054642015-05-25 14:28:36 +0900534 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100535 int i;
536
537 rocker_wait_reset(wait);
538 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
539
540 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
541 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
542 return -EIO;
543 }
544
545 for (i = 0; i < size; i++) {
546 if (buf[i] != expect[i]) {
547 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
548 buf[i], i, expect[i]);
549 return -EIO;
550 }
551 }
552 return 0;
553}
554
555#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
556#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
557
Simon Hormane5054642015-05-25 14:28:36 +0900558static int rocker_dma_test_offset(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100559 struct rocker_wait *wait, int offset)
560{
561 struct pci_dev *pdev = rocker->pdev;
562 unsigned char *alloc;
563 unsigned char *buf;
564 unsigned char *expect;
565 dma_addr_t dma_handle;
566 int i;
567 int err;
568
569 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
570 GFP_KERNEL | GFP_DMA);
571 if (!alloc)
572 return -ENOMEM;
573 buf = alloc + offset;
574 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
575
576 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
577 PCI_DMA_BIDIRECTIONAL);
578 if (pci_dma_mapping_error(pdev, dma_handle)) {
579 err = -EIO;
580 goto free_alloc;
581 }
582
583 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
584 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
585
586 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
587 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
588 dma_handle, buf, expect,
589 ROCKER_TEST_DMA_BUF_SIZE);
590 if (err)
591 goto unmap;
592
593 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
594 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
595 dma_handle, buf, expect,
596 ROCKER_TEST_DMA_BUF_SIZE);
597 if (err)
598 goto unmap;
599
600 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
601 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
602 expect[i] = ~buf[i];
603 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
604 dma_handle, buf, expect,
605 ROCKER_TEST_DMA_BUF_SIZE);
606 if (err)
607 goto unmap;
608
609unmap:
610 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
611 PCI_DMA_BIDIRECTIONAL);
612free_alloc:
613 kfree(alloc);
614
615 return err;
616}
617
Simon Hormane5054642015-05-25 14:28:36 +0900618static int rocker_dma_test(const struct rocker *rocker,
619 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100620{
621 int i;
622 int err;
623
624 for (i = 0; i < 8; i++) {
625 err = rocker_dma_test_offset(rocker, wait, i);
626 if (err)
627 return err;
628 }
629 return 0;
630}
631
632static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
633{
634 struct rocker_wait *wait = dev_id;
635
636 rocker_wait_wake_up(wait);
637
638 return IRQ_HANDLED;
639}
640
Simon Hormane5054642015-05-25 14:28:36 +0900641static int rocker_basic_hw_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100642{
Simon Hormane5054642015-05-25 14:28:36 +0900643 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100644 struct rocker_wait wait;
645 int err;
646
647 err = rocker_reg_test(rocker);
648 if (err) {
649 dev_err(&pdev->dev, "reg test failed\n");
650 return err;
651 }
652
653 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
654 rocker_test_irq_handler, 0,
655 rocker_driver_name, &wait);
656 if (err) {
657 dev_err(&pdev->dev, "cannot assign test irq\n");
658 return err;
659 }
660
661 rocker_wait_init(&wait);
662 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
663
664 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
665 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
666 err = -EIO;
667 goto free_irq;
668 }
669
670 err = rocker_dma_test(rocker, &wait);
671 if (err)
672 dev_err(&pdev->dev, "dma test failed\n");
673
674free_irq:
675 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
676 return err;
677}
678
679/******
680 * TLV
681 ******/
682
683#define ROCKER_TLV_ALIGNTO 8U
684#define ROCKER_TLV_ALIGN(len) \
685 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
686#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
687
688/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
689 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
690 * | Header | Pad | Payload | Pad |
691 * | (struct rocker_tlv) | ing | | ing |
692 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
693 * <--------------------------- tlv->len -------------------------->
694 */
695
696static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
697 int *remaining)
698{
699 int totlen = ROCKER_TLV_ALIGN(tlv->len);
700
701 *remaining -= totlen;
702 return (struct rocker_tlv *) ((char *) tlv + totlen);
703}
704
705static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
706{
707 return remaining >= (int) ROCKER_TLV_HDRLEN &&
708 tlv->len >= ROCKER_TLV_HDRLEN &&
709 tlv->len <= remaining;
710}
711
712#define rocker_tlv_for_each(pos, head, len, rem) \
713 for (pos = head, rem = len; \
714 rocker_tlv_ok(pos, rem); \
715 pos = rocker_tlv_next(pos, &(rem)))
716
717#define rocker_tlv_for_each_nested(pos, tlv, rem) \
718 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
719 rocker_tlv_len(tlv), rem)
720
721static int rocker_tlv_attr_size(int payload)
722{
723 return ROCKER_TLV_HDRLEN + payload;
724}
725
726static int rocker_tlv_total_size(int payload)
727{
728 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
729}
730
731static int rocker_tlv_padlen(int payload)
732{
733 return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
734}
735
736static int rocker_tlv_type(const struct rocker_tlv *tlv)
737{
738 return tlv->type;
739}
740
741static void *rocker_tlv_data(const struct rocker_tlv *tlv)
742{
743 return (char *) tlv + ROCKER_TLV_HDRLEN;
744}
745
746static int rocker_tlv_len(const struct rocker_tlv *tlv)
747{
748 return tlv->len - ROCKER_TLV_HDRLEN;
749}
750
751static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
752{
753 return *(u8 *) rocker_tlv_data(tlv);
754}
755
756static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
757{
758 return *(u16 *) rocker_tlv_data(tlv);
759}
760
Jiri Pirko9b03c712014-12-03 14:14:53 +0100761static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
762{
763 return *(__be16 *) rocker_tlv_data(tlv);
764}
765
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100766static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
767{
768 return *(u32 *) rocker_tlv_data(tlv);
769}
770
771static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
772{
773 return *(u64 *) rocker_tlv_data(tlv);
774}
775
Simon Hormane5054642015-05-25 14:28:36 +0900776static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100777 const char *buf, int buf_len)
778{
779 const struct rocker_tlv *tlv;
780 const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
781 int rem;
782
783 memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
784
785 rocker_tlv_for_each(tlv, head, buf_len, rem) {
786 u32 type = rocker_tlv_type(tlv);
787
788 if (type > 0 && type <= maxtype)
Simon Hormane5054642015-05-25 14:28:36 +0900789 tb[type] = tlv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100790 }
791}
792
Simon Hormane5054642015-05-25 14:28:36 +0900793static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100794 const struct rocker_tlv *tlv)
795{
796 rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
797 rocker_tlv_len(tlv));
798}
799
Simon Hormane5054642015-05-25 14:28:36 +0900800static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
801 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100802{
803 rocker_tlv_parse(tb, maxtype, desc_info->data,
804 desc_info->desc->tlv_size);
805}
806
807static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
808{
809 return (struct rocker_tlv *) ((char *) desc_info->data +
810 desc_info->tlv_size);
811}
812
813static int rocker_tlv_put(struct rocker_desc_info *desc_info,
814 int attrtype, int attrlen, const void *data)
815{
816 int tail_room = desc_info->data_size - desc_info->tlv_size;
817 int total_size = rocker_tlv_total_size(attrlen);
818 struct rocker_tlv *tlv;
819
820 if (unlikely(tail_room < total_size))
821 return -EMSGSIZE;
822
823 tlv = rocker_tlv_start(desc_info);
824 desc_info->tlv_size += total_size;
825 tlv->type = attrtype;
826 tlv->len = rocker_tlv_attr_size(attrlen);
827 memcpy(rocker_tlv_data(tlv), data, attrlen);
828 memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
829 return 0;
830}
831
832static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
833 int attrtype, u8 value)
834{
835 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
836}
837
838static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
839 int attrtype, u16 value)
840{
841 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
842}
843
Jiri Pirko9b03c712014-12-03 14:14:53 +0100844static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
845 int attrtype, __be16 value)
846{
847 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
848}
849
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100850static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
851 int attrtype, u32 value)
852{
853 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
854}
855
Jiri Pirko9b03c712014-12-03 14:14:53 +0100856static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
857 int attrtype, __be32 value)
858{
859 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
860}
861
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100862static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
863 int attrtype, u64 value)
864{
865 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
866}
867
868static struct rocker_tlv *
869rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
870{
871 struct rocker_tlv *start = rocker_tlv_start(desc_info);
872
873 if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
874 return NULL;
875
876 return start;
877}
878
879static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
880 struct rocker_tlv *start)
881{
882 start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
883}
884
885static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +0900886 const struct rocker_tlv *start)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100887{
Simon Hormane5054642015-05-25 14:28:36 +0900888 desc_info->tlv_size = (const char *) start - desc_info->data;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100889}
890
891/******************************************
892 * DMA rings and descriptors manipulations
893 ******************************************/
894
895static u32 __pos_inc(u32 pos, size_t limit)
896{
897 return ++pos == limit ? 0 : pos;
898}
899
Simon Hormane5054642015-05-25 14:28:36 +0900900static int rocker_desc_err(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100901{
Scott Feldman7eb344f2015-02-25 20:15:36 -0800902 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
903
904 switch (err) {
905 case ROCKER_OK:
906 return 0;
907 case -ROCKER_ENOENT:
908 return -ENOENT;
909 case -ROCKER_ENXIO:
910 return -ENXIO;
911 case -ROCKER_ENOMEM:
912 return -ENOMEM;
913 case -ROCKER_EEXIST:
914 return -EEXIST;
915 case -ROCKER_EINVAL:
916 return -EINVAL;
917 case -ROCKER_EMSGSIZE:
918 return -EMSGSIZE;
919 case -ROCKER_ENOTSUP:
920 return -EOPNOTSUPP;
921 case -ROCKER_ENOBUFS:
922 return -ENOBUFS;
923 }
924
925 return -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100926}
927
Simon Hormane5054642015-05-25 14:28:36 +0900928static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100929{
930 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
931}
932
Simon Hormane5054642015-05-25 14:28:36 +0900933static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100934{
935 u32 comp_err = desc_info->desc->comp_err;
936
937 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
938}
939
Simon Hormane5054642015-05-25 14:28:36 +0900940static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100941{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100942 return (void *)(uintptr_t)desc_info->desc->cookie;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100943}
944
Simon Hormane5054642015-05-25 14:28:36 +0900945static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100946 void *ptr)
947{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100948 desc_info->desc->cookie = (uintptr_t) ptr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100949}
950
951static struct rocker_desc_info *
Simon Hormane5054642015-05-25 14:28:36 +0900952rocker_desc_head_get(const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100953{
954 static struct rocker_desc_info *desc_info;
955 u32 head = __pos_inc(info->head, info->size);
956
957 desc_info = &info->desc_info[info->head];
958 if (head == info->tail)
959 return NULL; /* ring full */
960 desc_info->tlv_size = 0;
961 return desc_info;
962}
963
Simon Hormane5054642015-05-25 14:28:36 +0900964static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100965{
966 desc_info->desc->buf_size = desc_info->data_size;
967 desc_info->desc->tlv_size = desc_info->tlv_size;
968}
969
Simon Hormane5054642015-05-25 14:28:36 +0900970static void rocker_desc_head_set(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100971 struct rocker_dma_ring_info *info,
Simon Hormane5054642015-05-25 14:28:36 +0900972 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100973{
974 u32 head = __pos_inc(info->head, info->size);
975
976 BUG_ON(head == info->tail);
977 rocker_desc_commit(desc_info);
978 info->head = head;
979 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
980}
981
982static struct rocker_desc_info *
983rocker_desc_tail_get(struct rocker_dma_ring_info *info)
984{
985 static struct rocker_desc_info *desc_info;
986
987 if (info->tail == info->head)
988 return NULL; /* nothing to be done between head and tail */
989 desc_info = &info->desc_info[info->tail];
990 if (!rocker_desc_gen(desc_info))
991 return NULL; /* gen bit not set, desc is not ready yet */
992 info->tail = __pos_inc(info->tail, info->size);
993 desc_info->tlv_size = desc_info->desc->tlv_size;
994 return desc_info;
995}
996
Simon Hormane5054642015-05-25 14:28:36 +0900997static void rocker_dma_ring_credits_set(const struct rocker *rocker,
998 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100999 u32 credits)
1000{
1001 if (credits)
1002 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
1003}
1004
1005static unsigned long rocker_dma_ring_size_fix(size_t size)
1006{
1007 return max(ROCKER_DMA_SIZE_MIN,
1008 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
1009}
1010
Simon Hormane5054642015-05-25 14:28:36 +09001011static int rocker_dma_ring_create(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001012 unsigned int type,
1013 size_t size,
1014 struct rocker_dma_ring_info *info)
1015{
1016 int i;
1017
1018 BUG_ON(size != rocker_dma_ring_size_fix(size));
1019 info->size = size;
1020 info->type = type;
1021 info->head = 0;
1022 info->tail = 0;
1023 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
1024 GFP_KERNEL);
1025 if (!info->desc_info)
1026 return -ENOMEM;
1027
1028 info->desc = pci_alloc_consistent(rocker->pdev,
1029 info->size * sizeof(*info->desc),
1030 &info->mapaddr);
1031 if (!info->desc) {
1032 kfree(info->desc_info);
1033 return -ENOMEM;
1034 }
1035
1036 for (i = 0; i < info->size; i++)
1037 info->desc_info[i].desc = &info->desc[i];
1038
1039 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
1040 ROCKER_DMA_DESC_CTRL_RESET);
1041 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
1042 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
1043
1044 return 0;
1045}
1046
Simon Hormane5054642015-05-25 14:28:36 +09001047static void rocker_dma_ring_destroy(const struct rocker *rocker,
1048 const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001049{
1050 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
1051
1052 pci_free_consistent(rocker->pdev,
1053 info->size * sizeof(struct rocker_desc),
1054 info->desc, info->mapaddr);
1055 kfree(info->desc_info);
1056}
1057
Simon Hormane5054642015-05-25 14:28:36 +09001058static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001059 struct rocker_dma_ring_info *info)
1060{
1061 int i;
1062
1063 BUG_ON(info->head || info->tail);
1064
1065 /* When ring is consumer, we need to advance head for each desc.
1066 * That tells hw that the desc is ready to be used by it.
1067 */
1068 for (i = 0; i < info->size - 1; i++)
1069 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
1070 rocker_desc_commit(&info->desc_info[i]);
1071}
1072
Simon Hormane5054642015-05-25 14:28:36 +09001073static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
1074 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001075 int direction, size_t buf_size)
1076{
1077 struct pci_dev *pdev = rocker->pdev;
1078 int i;
1079 int err;
1080
1081 for (i = 0; i < info->size; i++) {
1082 struct rocker_desc_info *desc_info = &info->desc_info[i];
1083 struct rocker_desc *desc = &info->desc[i];
1084 dma_addr_t dma_handle;
1085 char *buf;
1086
1087 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
1088 if (!buf) {
1089 err = -ENOMEM;
1090 goto rollback;
1091 }
1092
1093 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1094 if (pci_dma_mapping_error(pdev, dma_handle)) {
1095 kfree(buf);
1096 err = -EIO;
1097 goto rollback;
1098 }
1099
1100 desc_info->data = buf;
1101 desc_info->data_size = buf_size;
1102 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1103
1104 desc->buf_addr = dma_handle;
1105 desc->buf_size = buf_size;
1106 }
1107 return 0;
1108
1109rollback:
1110 for (i--; i >= 0; i--) {
Simon Hormane5054642015-05-25 14:28:36 +09001111 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001112
1113 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1114 desc_info->data_size, direction);
1115 kfree(desc_info->data);
1116 }
1117 return err;
1118}
1119
Simon Hormane5054642015-05-25 14:28:36 +09001120static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
1121 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001122 int direction)
1123{
1124 struct pci_dev *pdev = rocker->pdev;
1125 int i;
1126
1127 for (i = 0; i < info->size; i++) {
Simon Hormane5054642015-05-25 14:28:36 +09001128 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001129 struct rocker_desc *desc = &info->desc[i];
1130
1131 desc->buf_addr = 0;
1132 desc->buf_size = 0;
1133 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1134 desc_info->data_size, direction);
1135 kfree(desc_info->data);
1136 }
1137}
1138
1139static int rocker_dma_rings_init(struct rocker *rocker)
1140{
Simon Hormane5054642015-05-25 14:28:36 +09001141 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001142 int err;
1143
1144 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1145 ROCKER_DMA_CMD_DEFAULT_SIZE,
1146 &rocker->cmd_ring);
1147 if (err) {
1148 dev_err(&pdev->dev, "failed to create command dma ring\n");
1149 return err;
1150 }
1151
1152 spin_lock_init(&rocker->cmd_ring_lock);
1153
1154 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1155 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1156 if (err) {
1157 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1158 goto err_dma_cmd_ring_bufs_alloc;
1159 }
1160
1161 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1162 ROCKER_DMA_EVENT_DEFAULT_SIZE,
1163 &rocker->event_ring);
1164 if (err) {
1165 dev_err(&pdev->dev, "failed to create event dma ring\n");
1166 goto err_dma_event_ring_create;
1167 }
1168
1169 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1170 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1171 if (err) {
1172 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1173 goto err_dma_event_ring_bufs_alloc;
1174 }
1175 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1176 return 0;
1177
1178err_dma_event_ring_bufs_alloc:
1179 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1180err_dma_event_ring_create:
1181 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1182 PCI_DMA_BIDIRECTIONAL);
1183err_dma_cmd_ring_bufs_alloc:
1184 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1185 return err;
1186}
1187
1188static void rocker_dma_rings_fini(struct rocker *rocker)
1189{
1190 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1191 PCI_DMA_BIDIRECTIONAL);
1192 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1193 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1194 PCI_DMA_BIDIRECTIONAL);
1195 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1196}
1197
Simon Horman534ba6a2015-06-01 13:25:04 +09001198static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001199 struct rocker_desc_info *desc_info,
1200 struct sk_buff *skb, size_t buf_len)
1201{
Simon Horman534ba6a2015-06-01 13:25:04 +09001202 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001203 struct pci_dev *pdev = rocker->pdev;
1204 dma_addr_t dma_handle;
1205
1206 dma_handle = pci_map_single(pdev, skb->data, buf_len,
1207 PCI_DMA_FROMDEVICE);
1208 if (pci_dma_mapping_error(pdev, dma_handle))
1209 return -EIO;
1210 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1211 goto tlv_put_failure;
1212 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1213 goto tlv_put_failure;
1214 return 0;
1215
1216tlv_put_failure:
1217 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1218 desc_info->tlv_size = 0;
1219 return -EMSGSIZE;
1220}
1221
Simon Hormane5054642015-05-25 14:28:36 +09001222static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001223{
1224 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1225}
1226
Simon Horman534ba6a2015-06-01 13:25:04 +09001227static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001228 struct rocker_desc_info *desc_info)
1229{
1230 struct net_device *dev = rocker_port->dev;
1231 struct sk_buff *skb;
1232 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1233 int err;
1234
1235 /* Ensure that hw will see tlv_size zero in case of an error.
1236 * That tells hw to use another descriptor.
1237 */
1238 rocker_desc_cookie_ptr_set(desc_info, NULL);
1239 desc_info->tlv_size = 0;
1240
1241 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1242 if (!skb)
1243 return -ENOMEM;
Simon Horman534ba6a2015-06-01 13:25:04 +09001244 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001245 if (err) {
1246 dev_kfree_skb_any(skb);
1247 return err;
1248 }
1249 rocker_desc_cookie_ptr_set(desc_info, skb);
1250 return 0;
1251}
1252
Simon Hormane5054642015-05-25 14:28:36 +09001253static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1254 const struct rocker_tlv **attrs)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001255{
1256 struct pci_dev *pdev = rocker->pdev;
1257 dma_addr_t dma_handle;
1258 size_t len;
1259
1260 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1261 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1262 return;
1263 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1264 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1265 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1266}
1267
Simon Hormane5054642015-05-25 14:28:36 +09001268static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1269 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001270{
Simon Hormane5054642015-05-25 14:28:36 +09001271 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001272 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1273
1274 if (!skb)
1275 return;
1276 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1277 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1278 dev_kfree_skb_any(skb);
1279}
1280
Simon Horman534ba6a2015-06-01 13:25:04 +09001281static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001282{
Simon Hormane5054642015-05-25 14:28:36 +09001283 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001284 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001285 int i;
1286 int err;
1287
1288 for (i = 0; i < rx_ring->size; i++) {
Simon Horman534ba6a2015-06-01 13:25:04 +09001289 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001290 &rx_ring->desc_info[i]);
1291 if (err)
1292 goto rollback;
1293 }
1294 return 0;
1295
1296rollback:
1297 for (i--; i >= 0; i--)
1298 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1299 return err;
1300}
1301
Simon Horman534ba6a2015-06-01 13:25:04 +09001302static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001303{
Simon Hormane5054642015-05-25 14:28:36 +09001304 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001305 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001306 int i;
1307
1308 for (i = 0; i < rx_ring->size; i++)
1309 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1310}
1311
1312static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1313{
1314 struct rocker *rocker = rocker_port->rocker;
1315 int err;
1316
1317 err = rocker_dma_ring_create(rocker,
1318 ROCKER_DMA_TX(rocker_port->port_number),
1319 ROCKER_DMA_TX_DEFAULT_SIZE,
1320 &rocker_port->tx_ring);
1321 if (err) {
1322 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1323 return err;
1324 }
1325
1326 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1327 PCI_DMA_TODEVICE,
1328 ROCKER_DMA_TX_DESC_SIZE);
1329 if (err) {
1330 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1331 goto err_dma_tx_ring_bufs_alloc;
1332 }
1333
1334 err = rocker_dma_ring_create(rocker,
1335 ROCKER_DMA_RX(rocker_port->port_number),
1336 ROCKER_DMA_RX_DEFAULT_SIZE,
1337 &rocker_port->rx_ring);
1338 if (err) {
1339 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1340 goto err_dma_rx_ring_create;
1341 }
1342
1343 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1344 PCI_DMA_BIDIRECTIONAL,
1345 ROCKER_DMA_RX_DESC_SIZE);
1346 if (err) {
1347 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1348 goto err_dma_rx_ring_bufs_alloc;
1349 }
1350
Simon Horman534ba6a2015-06-01 13:25:04 +09001351 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001352 if (err) {
1353 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1354 goto err_dma_rx_ring_skbs_alloc;
1355 }
1356 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1357
1358 return 0;
1359
1360err_dma_rx_ring_skbs_alloc:
1361 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1362 PCI_DMA_BIDIRECTIONAL);
1363err_dma_rx_ring_bufs_alloc:
1364 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1365err_dma_rx_ring_create:
1366 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1367 PCI_DMA_TODEVICE);
1368err_dma_tx_ring_bufs_alloc:
1369 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1370 return err;
1371}
1372
1373static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1374{
1375 struct rocker *rocker = rocker_port->rocker;
1376
Simon Horman534ba6a2015-06-01 13:25:04 +09001377 rocker_dma_rx_ring_skbs_free(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001378 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1379 PCI_DMA_BIDIRECTIONAL);
1380 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1381 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1382 PCI_DMA_TODEVICE);
1383 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1384}
1385
Simon Hormane5054642015-05-25 14:28:36 +09001386static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1387 bool enable)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001388{
1389 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1390
1391 if (enable)
David S. Miller71a83a62015-03-03 21:16:48 -05001392 val |= 1ULL << rocker_port->pport;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001393 else
David S. Miller71a83a62015-03-03 21:16:48 -05001394 val &= ~(1ULL << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001395 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1396}
1397
1398/********************************
1399 * Interrupt handler and helpers
1400 ********************************/
1401
1402static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1403{
1404 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001405 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001406 struct rocker_wait *wait;
1407 u32 credits = 0;
1408
1409 spin_lock(&rocker->cmd_ring_lock);
1410 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1411 wait = rocker_desc_cookie_ptr_get(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001412 if (wait->nowait) {
1413 rocker_desc_gen_clear(desc_info);
1414 rocker_wait_destroy(SWITCHDEV_TRANS_NONE, wait);
1415 } else {
1416 rocker_wait_wake_up(wait);
1417 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001418 credits++;
1419 }
1420 spin_unlock(&rocker->cmd_ring_lock);
1421 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1422
1423 return IRQ_HANDLED;
1424}
1425
Simon Hormane5054642015-05-25 14:28:36 +09001426static void rocker_port_link_up(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001427{
1428 netif_carrier_on(rocker_port->dev);
1429 netdev_info(rocker_port->dev, "Link is up\n");
1430}
1431
Simon Hormane5054642015-05-25 14:28:36 +09001432static void rocker_port_link_down(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001433{
1434 netif_carrier_off(rocker_port->dev);
1435 netdev_info(rocker_port->dev, "Link is down\n");
1436}
1437
Simon Hormane5054642015-05-25 14:28:36 +09001438static int rocker_event_link_change(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001439 const struct rocker_tlv *info)
1440{
Simon Hormane5054642015-05-25 14:28:36 +09001441 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001442 unsigned int port_number;
1443 bool link_up;
1444 struct rocker_port *rocker_port;
1445
1446 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001447 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001448 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1449 return -EIO;
1450 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001451 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001452 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1453
1454 if (port_number >= rocker->port_count)
1455 return -EINVAL;
1456
1457 rocker_port = rocker->ports[port_number];
1458 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1459 if (link_up)
1460 rocker_port_link_up(rocker_port);
1461 else
1462 rocker_port_link_down(rocker_port);
1463 }
1464
1465 return 0;
1466}
1467
Scott Feldman6c707942014-11-28 14:34:28 +01001468static int rocker_port_fdb(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001469 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01001470 const unsigned char *addr,
1471 __be16 vlan_id, int flags);
1472
Simon Hormane5054642015-05-25 14:28:36 +09001473static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
Scott Feldman6c707942014-11-28 14:34:28 +01001474 const struct rocker_tlv *info)
1475{
Simon Hormane5054642015-05-25 14:28:36 +09001476 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
Scott Feldman6c707942014-11-28 14:34:28 +01001477 unsigned int port_number;
1478 struct rocker_port *rocker_port;
Simon Hormane5054642015-05-25 14:28:36 +09001479 const unsigned char *addr;
Scott Feldman92014b92015-06-12 21:35:49 -07001480 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
Scott Feldman6c707942014-11-28 14:34:28 +01001481 __be16 vlan_id;
1482
1483 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001484 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
Scott Feldman6c707942014-11-28 14:34:28 +01001485 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1486 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1487 return -EIO;
1488 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001489 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
Scott Feldman6c707942014-11-28 14:34:28 +01001490 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
Jiri Pirko9b03c712014-12-03 14:14:53 +01001491 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
Scott Feldman6c707942014-11-28 14:34:28 +01001492
1493 if (port_number >= rocker->port_count)
1494 return -EINVAL;
1495
1496 rocker_port = rocker->ports[port_number];
1497
1498 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1499 rocker_port->stp_state != BR_STATE_FORWARDING)
1500 return 0;
1501
Scott Feldman92014b92015-06-12 21:35:49 -07001502 return rocker_port_fdb(rocker_port, SWITCHDEV_TRANS_NONE,
1503 addr, vlan_id, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01001504}
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001505
Simon Hormane5054642015-05-25 14:28:36 +09001506static int rocker_event_process(const struct rocker *rocker,
1507 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001508{
Simon Hormane5054642015-05-25 14:28:36 +09001509 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1510 const struct rocker_tlv *info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001511 u16 type;
1512
1513 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1514 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1515 !attrs[ROCKER_TLV_EVENT_INFO])
1516 return -EIO;
1517
1518 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1519 info = attrs[ROCKER_TLV_EVENT_INFO];
1520
1521 switch (type) {
1522 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1523 return rocker_event_link_change(rocker, info);
Scott Feldman6c707942014-11-28 14:34:28 +01001524 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1525 return rocker_event_mac_vlan_seen(rocker, info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001526 }
1527
1528 return -EOPNOTSUPP;
1529}
1530
1531static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1532{
1533 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001534 const struct pci_dev *pdev = rocker->pdev;
1535 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001536 u32 credits = 0;
1537 int err;
1538
1539 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1540 err = rocker_desc_err(desc_info);
1541 if (err) {
1542 dev_err(&pdev->dev, "event desc received with err %d\n",
1543 err);
1544 } else {
1545 err = rocker_event_process(rocker, desc_info);
1546 if (err)
1547 dev_err(&pdev->dev, "event processing failed with err %d\n",
1548 err);
1549 }
1550 rocker_desc_gen_clear(desc_info);
1551 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1552 credits++;
1553 }
1554 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1555
1556 return IRQ_HANDLED;
1557}
1558
1559static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1560{
1561 struct rocker_port *rocker_port = dev_id;
1562
1563 napi_schedule(&rocker_port->napi_tx);
1564 return IRQ_HANDLED;
1565}
1566
1567static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1568{
1569 struct rocker_port *rocker_port = dev_id;
1570
1571 napi_schedule(&rocker_port->napi_rx);
1572 return IRQ_HANDLED;
1573}
1574
1575/********************
1576 * Command interface
1577 ********************/
1578
Simon Horman534ba6a2015-06-01 13:25:04 +09001579typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001580 struct rocker_desc_info *desc_info,
1581 void *priv);
1582
Simon Horman534ba6a2015-06-01 13:25:04 +09001583typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001584 const struct rocker_desc_info *desc_info,
1585 void *priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001586
Simon Horman534ba6a2015-06-01 13:25:04 +09001587static int rocker_cmd_exec(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07001588 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09001589 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1590 rocker_cmd_proc_cb_t process, void *process_priv)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001591{
Simon Horman534ba6a2015-06-01 13:25:04 +09001592 struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001593 struct rocker_desc_info *desc_info;
1594 struct rocker_wait *wait;
Scott Feldman179f9a22015-06-12 21:35:46 -07001595 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1596 unsigned long lock_flags;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001597 int err;
1598
Scott Feldman179f9a22015-06-12 21:35:46 -07001599 wait = rocker_wait_create(rocker_port, trans, flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001600 if (!wait)
1601 return -ENOMEM;
Scott Feldman179f9a22015-06-12 21:35:46 -07001602 wait->nowait = nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001603
Scott Feldman179f9a22015-06-12 21:35:46 -07001604 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001605
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001606 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1607 if (!desc_info) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001608 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001609 err = -EAGAIN;
1610 goto out;
1611 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001612
Simon Horman534ba6a2015-06-01 13:25:04 +09001613 err = prepare(rocker_port, desc_info, prepare_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001614 if (err) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001615 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001616 goto out;
1617 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001618
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001619 rocker_desc_cookie_ptr_set(desc_info, wait);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001620
1621 if (trans != SWITCHDEV_TRANS_PREPARE)
1622 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1623
Scott Feldman179f9a22015-06-12 21:35:46 -07001624 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1625
1626 if (nowait)
1627 return 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001628
Scott Feldmanc4f20322015-05-10 09:47:50 -07001629 if (trans != SWITCHDEV_TRANS_PREPARE)
1630 if (!rocker_wait_event_timeout(wait, HZ / 10))
1631 return -EIO;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001632
1633 err = rocker_desc_err(desc_info);
1634 if (err)
1635 return err;
1636
1637 if (process)
Simon Horman534ba6a2015-06-01 13:25:04 +09001638 err = process(rocker_port, desc_info, process_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001639
1640 rocker_desc_gen_clear(desc_info);
1641out:
Simon Horman0985df72015-05-25 14:28:35 +09001642 rocker_wait_destroy(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001643 return err;
1644}
1645
1646static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001647rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001648 struct rocker_desc_info *desc_info,
1649 void *priv)
1650{
1651 struct rocker_tlv *cmd_info;
1652
1653 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1654 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1655 return -EMSGSIZE;
1656 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1657 if (!cmd_info)
1658 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001659 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1660 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001661 return -EMSGSIZE;
1662 rocker_tlv_nest_end(desc_info, cmd_info);
1663 return 0;
1664}
1665
1666static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001667rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001668 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001669 void *priv)
1670{
1671 struct ethtool_cmd *ecmd = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001672 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1673 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001674 u32 speed;
1675 u8 duplex;
1676 u8 autoneg;
1677
1678 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1679 if (!attrs[ROCKER_TLV_CMD_INFO])
1680 return -EIO;
1681
1682 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1683 attrs[ROCKER_TLV_CMD_INFO]);
1684 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1685 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1686 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1687 return -EIO;
1688
1689 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1690 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1691 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1692
1693 ecmd->transceiver = XCVR_INTERNAL;
1694 ecmd->supported = SUPPORTED_TP;
1695 ecmd->phy_address = 0xff;
1696 ecmd->port = PORT_TP;
1697 ethtool_cmd_speed_set(ecmd, speed);
1698 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1699 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1700
1701 return 0;
1702}
1703
1704static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001705rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001706 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001707 void *priv)
1708{
1709 unsigned char *macaddr = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001710 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1711 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1712 const struct rocker_tlv *attr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001713
1714 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1715 if (!attrs[ROCKER_TLV_CMD_INFO])
1716 return -EIO;
1717
1718 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1719 attrs[ROCKER_TLV_CMD_INFO]);
1720 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1721 if (!attr)
1722 return -EIO;
1723
1724 if (rocker_tlv_len(attr) != ETH_ALEN)
1725 return -EINVAL;
1726
1727 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1728 return 0;
1729}
1730
David Aherndb191702015-03-17 20:23:16 -06001731struct port_name {
1732 char *buf;
1733 size_t len;
1734};
1735
1736static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001737rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001738 const struct rocker_desc_info *desc_info,
David Aherndb191702015-03-17 20:23:16 -06001739 void *priv)
1740{
Simon Hormane5054642015-05-25 14:28:36 +09001741 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1742 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
David Aherndb191702015-03-17 20:23:16 -06001743 struct port_name *name = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001744 const struct rocker_tlv *attr;
David Aherndb191702015-03-17 20:23:16 -06001745 size_t i, j, len;
Simon Hormane5054642015-05-25 14:28:36 +09001746 const char *str;
David Aherndb191702015-03-17 20:23:16 -06001747
1748 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1749 if (!attrs[ROCKER_TLV_CMD_INFO])
1750 return -EIO;
1751
1752 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1753 attrs[ROCKER_TLV_CMD_INFO]);
1754 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1755 if (!attr)
1756 return -EIO;
1757
1758 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1759 str = rocker_tlv_data(attr);
1760
1761 /* make sure name only contains alphanumeric characters */
1762 for (i = j = 0; i < len; ++i) {
1763 if (isalnum(str[i])) {
1764 name->buf[j] = str[i];
1765 j++;
1766 }
1767 }
1768
1769 if (j == 0)
1770 return -EIO;
1771
1772 name->buf[j] = '\0';
1773
1774 return 0;
1775}
1776
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001777static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001778rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001779 struct rocker_desc_info *desc_info,
1780 void *priv)
1781{
1782 struct ethtool_cmd *ecmd = priv;
1783 struct rocker_tlv *cmd_info;
1784
1785 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1786 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1787 return -EMSGSIZE;
1788 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1789 if (!cmd_info)
1790 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001791 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1792 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001793 return -EMSGSIZE;
1794 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1795 ethtool_cmd_speed(ecmd)))
1796 return -EMSGSIZE;
1797 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1798 ecmd->duplex))
1799 return -EMSGSIZE;
1800 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1801 ecmd->autoneg))
1802 return -EMSGSIZE;
1803 rocker_tlv_nest_end(desc_info, cmd_info);
1804 return 0;
1805}
1806
1807static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001808rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001809 struct rocker_desc_info *desc_info,
1810 void *priv)
1811{
Simon Hormane5054642015-05-25 14:28:36 +09001812 const unsigned char *macaddr = priv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001813 struct rocker_tlv *cmd_info;
1814
1815 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1816 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1817 return -EMSGSIZE;
1818 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1819 if (!cmd_info)
1820 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001821 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1822 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001823 return -EMSGSIZE;
1824 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1825 ETH_ALEN, macaddr))
1826 return -EMSGSIZE;
1827 rocker_tlv_nest_end(desc_info, cmd_info);
1828 return 0;
1829}
1830
Scott Feldman5111f802014-11-28 14:34:30 +01001831static int
Scott Feldman77a58c72015-07-08 16:06:47 -07001832rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1833 struct rocker_desc_info *desc_info,
1834 void *priv)
1835{
1836 int mtu = *(int *)priv;
1837 struct rocker_tlv *cmd_info;
1838
1839 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1840 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1841 return -EMSGSIZE;
1842 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1843 if (!cmd_info)
1844 return -EMSGSIZE;
1845 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1846 rocker_port->pport))
1847 return -EMSGSIZE;
1848 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1849 mtu))
1850 return -EMSGSIZE;
1851 rocker_tlv_nest_end(desc_info, cmd_info);
1852 return 0;
1853}
1854
1855static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001856rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
Scott Feldman5111f802014-11-28 14:34:30 +01001857 struct rocker_desc_info *desc_info,
1858 void *priv)
1859{
1860 struct rocker_tlv *cmd_info;
1861
1862 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1863 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1864 return -EMSGSIZE;
1865 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1866 if (!cmd_info)
1867 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001868 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1869 rocker_port->pport))
Scott Feldman5111f802014-11-28 14:34:30 +01001870 return -EMSGSIZE;
1871 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1872 !!(rocker_port->brport_flags & BR_LEARNING)))
1873 return -EMSGSIZE;
1874 rocker_tlv_nest_end(desc_info, cmd_info);
1875 return 0;
1876}
1877
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001878static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1879 struct ethtool_cmd *ecmd)
1880{
Scott Feldman179f9a22015-06-12 21:35:46 -07001881 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001882 rocker_cmd_get_port_settings_prep, NULL,
1883 rocker_cmd_get_port_settings_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001884 ecmd);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001885}
1886
1887static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1888 unsigned char *macaddr)
1889{
Scott Feldman179f9a22015-06-12 21:35:46 -07001890 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001891 rocker_cmd_get_port_settings_prep, NULL,
1892 rocker_cmd_get_port_settings_macaddr_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001893 macaddr);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001894}
1895
1896static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1897 struct ethtool_cmd *ecmd)
1898{
Scott Feldman179f9a22015-06-12 21:35:46 -07001899 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001900 rocker_cmd_set_port_settings_ethtool_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001901 ecmd, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001902}
1903
1904static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1905 unsigned char *macaddr)
1906{
Scott Feldman179f9a22015-06-12 21:35:46 -07001907 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001908 rocker_cmd_set_port_settings_macaddr_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001909 macaddr, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001910}
1911
Scott Feldman77a58c72015-07-08 16:06:47 -07001912static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1913 int mtu)
1914{
1915 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
1916 rocker_cmd_set_port_settings_mtu_prep,
1917 &mtu, NULL, NULL);
1918}
1919
Scott Feldmanc4f20322015-05-10 09:47:50 -07001920static int rocker_port_set_learning(struct rocker_port *rocker_port,
1921 enum switchdev_trans trans)
Scott Feldman5111f802014-11-28 14:34:30 +01001922{
Scott Feldman179f9a22015-06-12 21:35:46 -07001923 return rocker_cmd_exec(rocker_port, trans, 0,
Scott Feldman5111f802014-11-28 14:34:30 +01001924 rocker_cmd_set_port_learning_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001925 NULL, NULL, NULL);
Scott Feldman5111f802014-11-28 14:34:30 +01001926}
1927
Simon Hormane5054642015-05-25 14:28:36 +09001928static int
1929rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1930 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001931{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001932 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1933 entry->key.ig_port.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001934 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001935 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1936 entry->key.ig_port.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001937 return -EMSGSIZE;
1938 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1939 entry->key.ig_port.goto_tbl))
1940 return -EMSGSIZE;
1941
1942 return 0;
1943}
1944
Simon Hormane5054642015-05-25 14:28:36 +09001945static int
1946rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1947 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001948{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001949 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1950 entry->key.vlan.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001951 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001952 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1953 entry->key.vlan.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001954 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001955 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1956 entry->key.vlan.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001957 return -EMSGSIZE;
1958 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1959 entry->key.vlan.goto_tbl))
1960 return -EMSGSIZE;
1961 if (entry->key.vlan.untagged &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01001962 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1963 entry->key.vlan.new_vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001964 return -EMSGSIZE;
1965
1966 return 0;
1967}
1968
Simon Hormane5054642015-05-25 14:28:36 +09001969static int
1970rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1971 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001972{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001973 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1974 entry->key.term_mac.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001975 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001976 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1977 entry->key.term_mac.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001978 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001979 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1980 entry->key.term_mac.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001981 return -EMSGSIZE;
1982 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1983 ETH_ALEN, entry->key.term_mac.eth_dst))
1984 return -EMSGSIZE;
1985 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1986 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1987 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001988 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1989 entry->key.term_mac.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001990 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001991 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1992 entry->key.term_mac.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001993 return -EMSGSIZE;
1994 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1995 entry->key.term_mac.goto_tbl))
1996 return -EMSGSIZE;
1997 if (entry->key.term_mac.copy_to_cpu &&
1998 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1999 entry->key.term_mac.copy_to_cpu))
2000 return -EMSGSIZE;
2001
2002 return 0;
2003}
2004
2005static int
2006rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002007 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002008{
Jiri Pirko9b03c712014-12-03 14:14:53 +01002009 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2010 entry->key.ucast_routing.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002011 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002012 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2013 entry->key.ucast_routing.dst4))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002014 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002015 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2016 entry->key.ucast_routing.dst4_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002017 return -EMSGSIZE;
2018 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2019 entry->key.ucast_routing.goto_tbl))
2020 return -EMSGSIZE;
2021 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2022 entry->key.ucast_routing.group_id))
2023 return -EMSGSIZE;
2024
2025 return 0;
2026}
2027
Simon Hormane5054642015-05-25 14:28:36 +09002028static int
2029rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2030 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002031{
2032 if (entry->key.bridge.has_eth_dst &&
2033 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2034 ETH_ALEN, entry->key.bridge.eth_dst))
2035 return -EMSGSIZE;
2036 if (entry->key.bridge.has_eth_dst_mask &&
2037 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2038 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2039 return -EMSGSIZE;
2040 if (entry->key.bridge.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002041 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2042 entry->key.bridge.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002043 return -EMSGSIZE;
2044 if (entry->key.bridge.tunnel_id &&
2045 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2046 entry->key.bridge.tunnel_id))
2047 return -EMSGSIZE;
2048 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2049 entry->key.bridge.goto_tbl))
2050 return -EMSGSIZE;
2051 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2052 entry->key.bridge.group_id))
2053 return -EMSGSIZE;
2054 if (entry->key.bridge.copy_to_cpu &&
2055 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2056 entry->key.bridge.copy_to_cpu))
2057 return -EMSGSIZE;
2058
2059 return 0;
2060}
2061
Simon Hormane5054642015-05-25 14:28:36 +09002062static int
2063rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2064 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002065{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002066 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2067 entry->key.acl.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002068 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002069 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2070 entry->key.acl.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002071 return -EMSGSIZE;
2072 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2073 ETH_ALEN, entry->key.acl.eth_src))
2074 return -EMSGSIZE;
2075 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2076 ETH_ALEN, entry->key.acl.eth_src_mask))
2077 return -EMSGSIZE;
2078 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2079 ETH_ALEN, entry->key.acl.eth_dst))
2080 return -EMSGSIZE;
2081 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2082 ETH_ALEN, entry->key.acl.eth_dst_mask))
2083 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002084 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2085 entry->key.acl.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002086 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002087 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2088 entry->key.acl.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002089 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002090 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2091 entry->key.acl.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002092 return -EMSGSIZE;
2093
2094 switch (ntohs(entry->key.acl.eth_type)) {
2095 case ETH_P_IP:
2096 case ETH_P_IPV6:
2097 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2098 entry->key.acl.ip_proto))
2099 return -EMSGSIZE;
2100 if (rocker_tlv_put_u8(desc_info,
2101 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2102 entry->key.acl.ip_proto_mask))
2103 return -EMSGSIZE;
2104 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2105 entry->key.acl.ip_tos & 0x3f))
2106 return -EMSGSIZE;
2107 if (rocker_tlv_put_u8(desc_info,
2108 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2109 entry->key.acl.ip_tos_mask & 0x3f))
2110 return -EMSGSIZE;
2111 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2112 (entry->key.acl.ip_tos & 0xc0) >> 6))
2113 return -EMSGSIZE;
2114 if (rocker_tlv_put_u8(desc_info,
2115 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2116 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2117 return -EMSGSIZE;
2118 break;
2119 }
2120
2121 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2122 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2123 entry->key.acl.group_id))
2124 return -EMSGSIZE;
2125
2126 return 0;
2127}
2128
Simon Horman534ba6a2015-06-01 13:25:04 +09002129static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002130 struct rocker_desc_info *desc_info,
2131 void *priv)
2132{
Simon Hormane5054642015-05-25 14:28:36 +09002133 const struct rocker_flow_tbl_entry *entry = priv;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002134 struct rocker_tlv *cmd_info;
2135 int err = 0;
2136
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002137 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002138 return -EMSGSIZE;
2139 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2140 if (!cmd_info)
2141 return -EMSGSIZE;
2142 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2143 entry->key.tbl_id))
2144 return -EMSGSIZE;
2145 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2146 entry->key.priority))
2147 return -EMSGSIZE;
2148 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2149 return -EMSGSIZE;
2150 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2151 entry->cookie))
2152 return -EMSGSIZE;
2153
2154 switch (entry->key.tbl_id) {
2155 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2156 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2157 break;
2158 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2159 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2160 break;
2161 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2162 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2163 break;
2164 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2165 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2166 break;
2167 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2168 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2169 break;
2170 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2171 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2172 break;
2173 default:
2174 err = -ENOTSUPP;
2175 break;
2176 }
2177
2178 if (err)
2179 return err;
2180
2181 rocker_tlv_nest_end(desc_info, cmd_info);
2182
2183 return 0;
2184}
2185
Simon Horman534ba6a2015-06-01 13:25:04 +09002186static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002187 struct rocker_desc_info *desc_info,
2188 void *priv)
2189{
2190 const struct rocker_flow_tbl_entry *entry = priv;
2191 struct rocker_tlv *cmd_info;
2192
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002193 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002194 return -EMSGSIZE;
2195 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2196 if (!cmd_info)
2197 return -EMSGSIZE;
2198 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2199 entry->cookie))
2200 return -EMSGSIZE;
2201 rocker_tlv_nest_end(desc_info, cmd_info);
2202
2203 return 0;
2204}
2205
2206static int
2207rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2208 struct rocker_group_tbl_entry *entry)
2209{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002210 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002211 ROCKER_GROUP_PORT_GET(entry->group_id)))
2212 return -EMSGSIZE;
2213 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2214 entry->l2_interface.pop_vlan))
2215 return -EMSGSIZE;
2216
2217 return 0;
2218}
2219
2220static int
2221rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002222 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002223{
2224 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2225 entry->l2_rewrite.group_id))
2226 return -EMSGSIZE;
2227 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2228 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2229 ETH_ALEN, entry->l2_rewrite.eth_src))
2230 return -EMSGSIZE;
2231 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2232 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2233 ETH_ALEN, entry->l2_rewrite.eth_dst))
2234 return -EMSGSIZE;
2235 if (entry->l2_rewrite.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002236 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2237 entry->l2_rewrite.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002238 return -EMSGSIZE;
2239
2240 return 0;
2241}
2242
2243static int
2244rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002245 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002246{
2247 int i;
2248 struct rocker_tlv *group_ids;
2249
2250 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2251 entry->group_count))
2252 return -EMSGSIZE;
2253
2254 group_ids = rocker_tlv_nest_start(desc_info,
2255 ROCKER_TLV_OF_DPA_GROUP_IDS);
2256 if (!group_ids)
2257 return -EMSGSIZE;
2258
2259 for (i = 0; i < entry->group_count; i++)
2260 /* Note TLV array is 1-based */
2261 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2262 return -EMSGSIZE;
2263
2264 rocker_tlv_nest_end(desc_info, group_ids);
2265
2266 return 0;
2267}
2268
2269static int
2270rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002271 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002272{
2273 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2274 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2275 ETH_ALEN, entry->l3_unicast.eth_src))
2276 return -EMSGSIZE;
2277 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2278 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2279 ETH_ALEN, entry->l3_unicast.eth_dst))
2280 return -EMSGSIZE;
2281 if (entry->l3_unicast.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002282 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2283 entry->l3_unicast.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002284 return -EMSGSIZE;
2285 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2286 entry->l3_unicast.ttl_check))
2287 return -EMSGSIZE;
2288 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2289 entry->l3_unicast.group_id))
2290 return -EMSGSIZE;
2291
2292 return 0;
2293}
2294
Simon Horman534ba6a2015-06-01 13:25:04 +09002295static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002296 struct rocker_desc_info *desc_info,
2297 void *priv)
2298{
2299 struct rocker_group_tbl_entry *entry = priv;
2300 struct rocker_tlv *cmd_info;
2301 int err = 0;
2302
2303 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2304 return -EMSGSIZE;
2305 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2306 if (!cmd_info)
2307 return -EMSGSIZE;
2308
2309 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2310 entry->group_id))
2311 return -EMSGSIZE;
2312
2313 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2314 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2315 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2316 break;
2317 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2318 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2319 break;
2320 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2321 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2322 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2323 break;
2324 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2325 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2326 break;
2327 default:
2328 err = -ENOTSUPP;
2329 break;
2330 }
2331
2332 if (err)
2333 return err;
2334
2335 rocker_tlv_nest_end(desc_info, cmd_info);
2336
2337 return 0;
2338}
2339
Simon Horman534ba6a2015-06-01 13:25:04 +09002340static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002341 struct rocker_desc_info *desc_info,
2342 void *priv)
2343{
2344 const struct rocker_group_tbl_entry *entry = priv;
2345 struct rocker_tlv *cmd_info;
2346
2347 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2348 return -EMSGSIZE;
2349 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2350 if (!cmd_info)
2351 return -EMSGSIZE;
2352 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2353 entry->group_id))
2354 return -EMSGSIZE;
2355 rocker_tlv_nest_end(desc_info, cmd_info);
2356
2357 return 0;
2358}
2359
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002360/***************************************************
2361 * Flow, group, FDB, internal VLAN and neigh tables
2362 ***************************************************/
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002363
2364static int rocker_init_tbls(struct rocker *rocker)
2365{
2366 hash_init(rocker->flow_tbl);
2367 spin_lock_init(&rocker->flow_tbl_lock);
2368
2369 hash_init(rocker->group_tbl);
2370 spin_lock_init(&rocker->group_tbl_lock);
2371
2372 hash_init(rocker->fdb_tbl);
2373 spin_lock_init(&rocker->fdb_tbl_lock);
2374
2375 hash_init(rocker->internal_vlan_tbl);
2376 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2377
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002378 hash_init(rocker->neigh_tbl);
2379 spin_lock_init(&rocker->neigh_tbl_lock);
2380
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002381 return 0;
2382}
2383
2384static void rocker_free_tbls(struct rocker *rocker)
2385{
2386 unsigned long flags;
2387 struct rocker_flow_tbl_entry *flow_entry;
2388 struct rocker_group_tbl_entry *group_entry;
2389 struct rocker_fdb_tbl_entry *fdb_entry;
2390 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002391 struct rocker_neigh_tbl_entry *neigh_entry;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002392 struct hlist_node *tmp;
2393 int bkt;
2394
2395 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2396 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2397 hash_del(&flow_entry->entry);
2398 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2399
2400 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2401 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2402 hash_del(&group_entry->entry);
2403 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2404
2405 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2406 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2407 hash_del(&fdb_entry->entry);
2408 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2409
2410 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2411 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2412 tmp, internal_vlan_entry, entry)
2413 hash_del(&internal_vlan_entry->entry);
2414 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002415
2416 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2417 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2418 hash_del(&neigh_entry->entry);
2419 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002420}
2421
2422static struct rocker_flow_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002423rocker_flow_tbl_find(const struct rocker *rocker,
2424 const struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002425{
2426 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002427 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002428
2429 hash_for_each_possible(rocker->flow_tbl, found,
2430 entry, match->key_crc32) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002431 if (memcmp(&found->key, &match->key, key_len) == 0)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002432 return found;
2433 }
2434
2435 return NULL;
2436}
2437
2438static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002439 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002440 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002441{
2442 struct rocker *rocker = rocker_port->rocker;
2443 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002444 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002445 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002446
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002447 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002448
Scott Feldman179f9a22015-06-12 21:35:46 -07002449 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002450
2451 found = rocker_flow_tbl_find(rocker, match);
2452
2453 if (found) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002454 match->cookie = found->cookie;
Scott Feldmanc4f20322015-05-10 09:47:50 -07002455 if (trans != SWITCHDEV_TRANS_PREPARE)
2456 hash_del(&found->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002457 rocker_port_kfree(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002458 found = match;
2459 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002460 } else {
2461 found = match;
2462 found->cookie = rocker->flow_tbl_next_cookie++;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002463 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002464 }
2465
Scott Feldmanc4f20322015-05-10 09:47:50 -07002466 if (trans != SWITCHDEV_TRANS_PREPARE)
2467 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002468
Scott Feldman179f9a22015-06-12 21:35:46 -07002469 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002470
Scott Feldman179f9a22015-06-12 21:35:46 -07002471 return rocker_cmd_exec(rocker_port, trans, flags,
2472 rocker_cmd_flow_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002473}
2474
2475static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002476 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002477 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002478{
2479 struct rocker *rocker = rocker_port->rocker;
2480 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002481 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002482 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002483 int err = 0;
2484
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002485 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002486
Scott Feldman179f9a22015-06-12 21:35:46 -07002487 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002488
2489 found = rocker_flow_tbl_find(rocker, match);
2490
2491 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002492 if (trans != SWITCHDEV_TRANS_PREPARE)
2493 hash_del(&found->entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002494 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002495 }
2496
Scott Feldman179f9a22015-06-12 21:35:46 -07002497 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002498
Simon Horman0985df72015-05-25 14:28:35 +09002499 rocker_port_kfree(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002500
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002501 if (found) {
Scott Feldman179f9a22015-06-12 21:35:46 -07002502 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002503 rocker_cmd_flow_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002504 found, NULL, NULL);
Simon Horman0985df72015-05-25 14:28:35 +09002505 rocker_port_kfree(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002506 }
2507
2508 return err;
2509}
2510
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002511static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002512 enum switchdev_trans trans, int flags,
2513 struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002514{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002515 if (flags & ROCKER_OP_FLAG_REMOVE)
Scott Feldman179f9a22015-06-12 21:35:46 -07002516 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002517 else
Scott Feldman179f9a22015-06-12 21:35:46 -07002518 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002519}
2520
2521static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002522 enum switchdev_trans trans, int flags,
2523 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002524 enum rocker_of_dpa_table_id goto_tbl)
2525{
2526 struct rocker_flow_tbl_entry *entry;
2527
Scott Feldman179f9a22015-06-12 21:35:46 -07002528 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002529 if (!entry)
2530 return -ENOMEM;
2531
2532 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2533 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002534 entry->key.ig_port.in_pport = in_pport;
2535 entry->key.ig_port.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002536 entry->key.ig_port.goto_tbl = goto_tbl;
2537
Scott Feldmanc4f20322015-05-10 09:47:50 -07002538 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002539}
2540
2541static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002542 enum switchdev_trans trans, int flags,
2543 u32 in_pport, __be16 vlan_id,
2544 __be16 vlan_id_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002545 enum rocker_of_dpa_table_id goto_tbl,
2546 bool untagged, __be16 new_vlan_id)
2547{
2548 struct rocker_flow_tbl_entry *entry;
2549
Scott Feldman179f9a22015-06-12 21:35:46 -07002550 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002551 if (!entry)
2552 return -ENOMEM;
2553
2554 entry->key.priority = ROCKER_PRIORITY_VLAN;
2555 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002556 entry->key.vlan.in_pport = in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002557 entry->key.vlan.vlan_id = vlan_id;
2558 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2559 entry->key.vlan.goto_tbl = goto_tbl;
2560
2561 entry->key.vlan.untagged = untagged;
2562 entry->key.vlan.new_vlan_id = new_vlan_id;
2563
Scott Feldmanc4f20322015-05-10 09:47:50 -07002564 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002565}
2566
2567static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002568 enum switchdev_trans trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002569 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002570 __be16 eth_type, const u8 *eth_dst,
2571 const u8 *eth_dst_mask, __be16 vlan_id,
2572 __be16 vlan_id_mask, bool copy_to_cpu,
2573 int flags)
2574{
2575 struct rocker_flow_tbl_entry *entry;
2576
Scott Feldman179f9a22015-06-12 21:35:46 -07002577 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002578 if (!entry)
2579 return -ENOMEM;
2580
2581 if (is_multicast_ether_addr(eth_dst)) {
2582 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2583 entry->key.term_mac.goto_tbl =
2584 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2585 } else {
2586 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2587 entry->key.term_mac.goto_tbl =
2588 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2589 }
2590
2591 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002592 entry->key.term_mac.in_pport = in_pport;
2593 entry->key.term_mac.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002594 entry->key.term_mac.eth_type = eth_type;
2595 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2596 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2597 entry->key.term_mac.vlan_id = vlan_id;
2598 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2599 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2600
Scott Feldmanc4f20322015-05-10 09:47:50 -07002601 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002602}
2603
2604static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002605 enum switchdev_trans trans, int flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002606 const u8 *eth_dst, const u8 *eth_dst_mask,
2607 __be16 vlan_id, u32 tunnel_id,
2608 enum rocker_of_dpa_table_id goto_tbl,
2609 u32 group_id, bool copy_to_cpu)
2610{
2611 struct rocker_flow_tbl_entry *entry;
2612 u32 priority;
2613 bool vlan_bridging = !!vlan_id;
2614 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2615 bool wild = false;
2616
Scott Feldman179f9a22015-06-12 21:35:46 -07002617 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002618 if (!entry)
2619 return -ENOMEM;
2620
2621 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2622
2623 if (eth_dst) {
2624 entry->key.bridge.has_eth_dst = 1;
2625 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2626 }
2627 if (eth_dst_mask) {
2628 entry->key.bridge.has_eth_dst_mask = 1;
2629 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
Simon Horman629161f2015-04-30 15:21:29 +09002630 if (!ether_addr_equal(eth_dst_mask, ff_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002631 wild = true;
2632 }
2633
2634 priority = ROCKER_PRIORITY_UNKNOWN;
Thomas Graf51ace882014-11-28 14:34:32 +01002635 if (vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002636 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002637 else if (vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002638 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002639 else if (vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002640 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
Thomas Graf51ace882014-11-28 14:34:32 +01002641 else if (!vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002642 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002643 else if (!vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002644 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002645 else if (!vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002646 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2647
2648 entry->key.priority = priority;
2649 entry->key.bridge.vlan_id = vlan_id;
2650 entry->key.bridge.tunnel_id = tunnel_id;
2651 entry->key.bridge.goto_tbl = goto_tbl;
2652 entry->key.bridge.group_id = group_id;
2653 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2654
Scott Feldmanc4f20322015-05-10 09:47:50 -07002655 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002656}
2657
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002658static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002659 enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002660 __be16 eth_type, __be32 dst,
2661 __be32 dst_mask, u32 priority,
2662 enum rocker_of_dpa_table_id goto_tbl,
2663 u32 group_id, int flags)
2664{
2665 struct rocker_flow_tbl_entry *entry;
2666
Scott Feldman179f9a22015-06-12 21:35:46 -07002667 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002668 if (!entry)
2669 return -ENOMEM;
2670
2671 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2672 entry->key.priority = priority;
2673 entry->key.ucast_routing.eth_type = eth_type;
2674 entry->key.ucast_routing.dst4 = dst;
2675 entry->key.ucast_routing.dst4_mask = dst_mask;
2676 entry->key.ucast_routing.goto_tbl = goto_tbl;
2677 entry->key.ucast_routing.group_id = group_id;
2678 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2679 ucast_routing.group_id);
2680
Scott Feldmanc4f20322015-05-10 09:47:50 -07002681 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002682}
2683
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002684static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002685 enum switchdev_trans trans, int flags,
2686 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002687 const u8 *eth_src, const u8 *eth_src_mask,
2688 const u8 *eth_dst, const u8 *eth_dst_mask,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002689 __be16 eth_type, __be16 vlan_id,
2690 __be16 vlan_id_mask, u8 ip_proto,
2691 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002692 u32 group_id)
2693{
2694 u32 priority;
2695 struct rocker_flow_tbl_entry *entry;
2696
Scott Feldman179f9a22015-06-12 21:35:46 -07002697 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002698 if (!entry)
2699 return -ENOMEM;
2700
2701 priority = ROCKER_PRIORITY_ACL_NORMAL;
2702 if (eth_dst && eth_dst_mask) {
Simon Horman629161f2015-04-30 15:21:29 +09002703 if (ether_addr_equal(eth_dst_mask, mcast_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002704 priority = ROCKER_PRIORITY_ACL_DFLT;
2705 else if (is_link_local_ether_addr(eth_dst))
2706 priority = ROCKER_PRIORITY_ACL_CTRL;
2707 }
2708
2709 entry->key.priority = priority;
2710 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002711 entry->key.acl.in_pport = in_pport;
2712 entry->key.acl.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002713
2714 if (eth_src)
2715 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2716 if (eth_src_mask)
2717 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2718 if (eth_dst)
2719 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2720 if (eth_dst_mask)
2721 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2722
2723 entry->key.acl.eth_type = eth_type;
2724 entry->key.acl.vlan_id = vlan_id;
2725 entry->key.acl.vlan_id_mask = vlan_id_mask;
2726 entry->key.acl.ip_proto = ip_proto;
2727 entry->key.acl.ip_proto_mask = ip_proto_mask;
2728 entry->key.acl.ip_tos = ip_tos;
2729 entry->key.acl.ip_tos_mask = ip_tos_mask;
2730 entry->key.acl.group_id = group_id;
2731
Scott Feldmanc4f20322015-05-10 09:47:50 -07002732 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002733}
2734
2735static struct rocker_group_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002736rocker_group_tbl_find(const struct rocker *rocker,
2737 const struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002738{
2739 struct rocker_group_tbl_entry *found;
2740
2741 hash_for_each_possible(rocker->group_tbl, found,
2742 entry, match->group_id) {
2743 if (found->group_id == match->group_id)
2744 return found;
2745 }
2746
2747 return NULL;
2748}
2749
Simon Horman0985df72015-05-25 14:28:35 +09002750static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002751 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002752{
2753 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2754 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2755 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
Simon Horman0985df72015-05-25 14:28:35 +09002756 rocker_port_kfree(trans, entry->group_ids);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002757 break;
2758 default:
2759 break;
2760 }
Simon Horman0985df72015-05-25 14:28:35 +09002761 rocker_port_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002762}
2763
2764static int rocker_group_tbl_add(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002765 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002766 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002767{
2768 struct rocker *rocker = rocker_port->rocker;
2769 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002770 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002771
Scott Feldman179f9a22015-06-12 21:35:46 -07002772 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002773
2774 found = rocker_group_tbl_find(rocker, match);
2775
2776 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002777 if (trans != SWITCHDEV_TRANS_PREPARE)
2778 hash_del(&found->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002779 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002780 found = match;
2781 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2782 } else {
2783 found = match;
2784 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2785 }
2786
Scott Feldmanc4f20322015-05-10 09:47:50 -07002787 if (trans != SWITCHDEV_TRANS_PREPARE)
2788 hash_add(rocker->group_tbl, &found->entry, found->group_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002789
Scott Feldman179f9a22015-06-12 21:35:46 -07002790 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002791
Scott Feldman179f9a22015-06-12 21:35:46 -07002792 return rocker_cmd_exec(rocker_port, trans, flags,
2793 rocker_cmd_group_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002794}
2795
2796static int rocker_group_tbl_del(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002797 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002798 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002799{
2800 struct rocker *rocker = rocker_port->rocker;
2801 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002802 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002803 int err = 0;
2804
Scott Feldman179f9a22015-06-12 21:35:46 -07002805 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002806
2807 found = rocker_group_tbl_find(rocker, match);
2808
2809 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002810 if (trans != SWITCHDEV_TRANS_PREPARE)
2811 hash_del(&found->entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002812 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2813 }
2814
Scott Feldman179f9a22015-06-12 21:35:46 -07002815 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002816
Simon Horman0985df72015-05-25 14:28:35 +09002817 rocker_group_tbl_entry_free(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002818
2819 if (found) {
Scott Feldman179f9a22015-06-12 21:35:46 -07002820 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002821 rocker_cmd_group_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002822 found, NULL, NULL);
Simon Horman0985df72015-05-25 14:28:35 +09002823 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002824 }
2825
2826 return err;
2827}
2828
2829static int rocker_group_tbl_do(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002830 enum switchdev_trans trans, int flags,
2831 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002832{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002833 if (flags & ROCKER_OP_FLAG_REMOVE)
Scott Feldman179f9a22015-06-12 21:35:46 -07002834 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002835 else
Scott Feldman179f9a22015-06-12 21:35:46 -07002836 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002837}
2838
2839static int rocker_group_l2_interface(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002840 enum switchdev_trans trans, int flags,
2841 __be16 vlan_id, u32 out_pport,
2842 int pop_vlan)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002843{
2844 struct rocker_group_tbl_entry *entry;
2845
Scott Feldman179f9a22015-06-12 21:35:46 -07002846 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002847 if (!entry)
2848 return -ENOMEM;
2849
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002850 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002851 entry->l2_interface.pop_vlan = pop_vlan;
2852
Scott Feldmanc4f20322015-05-10 09:47:50 -07002853 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002854}
2855
2856static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002857 enum switchdev_trans trans,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002858 int flags, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002859 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002860{
2861 struct rocker_group_tbl_entry *entry;
2862
Scott Feldman179f9a22015-06-12 21:35:46 -07002863 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002864 if (!entry)
2865 return -ENOMEM;
2866
2867 entry->group_id = group_id;
2868 entry->group_count = group_count;
2869
Scott Feldman179f9a22015-06-12 21:35:46 -07002870 entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
2871 group_count, sizeof(u32));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002872 if (!entry->group_ids) {
Simon Horman0985df72015-05-25 14:28:35 +09002873 rocker_port_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002874 return -ENOMEM;
2875 }
2876 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2877
Scott Feldmanc4f20322015-05-10 09:47:50 -07002878 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002879}
2880
2881static int rocker_group_l2_flood(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002882 enum switchdev_trans trans, int flags,
2883 __be16 vlan_id, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002884 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002885{
Scott Feldmanc4f20322015-05-10 09:47:50 -07002886 return rocker_group_l2_fan_out(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002887 group_count, group_ids,
2888 group_id);
2889}
2890
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002891static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002892 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09002893 u32 index, const u8 *src_mac, const u8 *dst_mac,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002894 __be16 vlan_id, bool ttl_check, u32 pport)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002895{
2896 struct rocker_group_tbl_entry *entry;
2897
Scott Feldman179f9a22015-06-12 21:35:46 -07002898 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002899 if (!entry)
2900 return -ENOMEM;
2901
2902 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2903 if (src_mac)
2904 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2905 if (dst_mac)
2906 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2907 entry->l3_unicast.vlan_id = vlan_id;
2908 entry->l3_unicast.ttl_check = ttl_check;
2909 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2910
Scott Feldmanc4f20322015-05-10 09:47:50 -07002911 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002912}
2913
2914static struct rocker_neigh_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002915rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002916{
2917 struct rocker_neigh_tbl_entry *found;
2918
Scott Feldman0f43deb2015-03-06 15:54:51 -08002919 hash_for_each_possible(rocker->neigh_tbl, found,
2920 entry, be32_to_cpu(ip_addr))
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002921 if (found->ip_addr == ip_addr)
2922 return found;
2923
2924 return NULL;
2925}
2926
2927static void _rocker_neigh_add(struct rocker *rocker,
Simon Horman550ecc92015-05-21 12:40:16 +09002928 enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002929 struct rocker_neigh_tbl_entry *entry)
2930{
Scott Feldman4d81db42015-06-12 21:24:40 -07002931 if (trans != SWITCHDEV_TRANS_COMMIT)
2932 entry->index = rocker->neigh_tbl_next_index++;
Simon Horman550ecc92015-05-21 12:40:16 +09002933 if (trans == SWITCHDEV_TRANS_PREPARE)
2934 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002935 entry->ref_count++;
Scott Feldman0f43deb2015-03-06 15:54:51 -08002936 hash_add(rocker->neigh_tbl, &entry->entry,
2937 be32_to_cpu(entry->ip_addr));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002938}
2939
Simon Horman0985df72015-05-25 14:28:35 +09002940static void _rocker_neigh_del(enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002941 struct rocker_neigh_tbl_entry *entry)
2942{
Simon Horman550ecc92015-05-21 12:40:16 +09002943 if (trans == SWITCHDEV_TRANS_PREPARE)
2944 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002945 if (--entry->ref_count == 0) {
2946 hash_del(&entry->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002947 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002948 }
2949}
2950
Scott Feldmanc4f20322015-05-10 09:47:50 -07002951static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
Simon Horman550ecc92015-05-21 12:40:16 +09002952 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09002953 const u8 *eth_dst, bool ttl_check)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002954{
2955 if (eth_dst) {
2956 ether_addr_copy(entry->eth_dst, eth_dst);
2957 entry->ttl_check = ttl_check;
Simon Horman550ecc92015-05-21 12:40:16 +09002958 } else if (trans != SWITCHDEV_TRANS_PREPARE) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002959 entry->ref_count++;
2960 }
2961}
2962
2963static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002964 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09002965 int flags, __be32 ip_addr, const u8 *eth_dst)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002966{
2967 struct rocker *rocker = rocker_port->rocker;
2968 struct rocker_neigh_tbl_entry *entry;
2969 struct rocker_neigh_tbl_entry *found;
2970 unsigned long lock_flags;
2971 __be16 eth_type = htons(ETH_P_IP);
2972 enum rocker_of_dpa_table_id goto_tbl =
2973 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2974 u32 group_id;
2975 u32 priority = 0;
2976 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2977 bool updating;
2978 bool removing;
2979 int err = 0;
2980
Scott Feldman179f9a22015-06-12 21:35:46 -07002981 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002982 if (!entry)
2983 return -ENOMEM;
2984
2985 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2986
2987 found = rocker_neigh_tbl_find(rocker, ip_addr);
2988
2989 updating = found && adding;
2990 removing = found && !adding;
2991 adding = !found && adding;
2992
2993 if (adding) {
2994 entry->ip_addr = ip_addr;
2995 entry->dev = rocker_port->dev;
2996 ether_addr_copy(entry->eth_dst, eth_dst);
2997 entry->ttl_check = true;
Simon Horman550ecc92015-05-21 12:40:16 +09002998 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002999 } else if (removing) {
3000 memcpy(entry, found, sizeof(*entry));
Simon Horman0985df72015-05-25 14:28:35 +09003001 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003002 } else if (updating) {
Simon Horman550ecc92015-05-21 12:40:16 +09003003 _rocker_neigh_update(found, trans, eth_dst, true);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003004 memcpy(entry, found, sizeof(*entry));
3005 } else {
3006 err = -ENOENT;
3007 }
3008
3009 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3010
3011 if (err)
3012 goto err_out;
3013
3014 /* For each active neighbor, we have an L3 unicast group and
3015 * a /32 route to the neighbor, which uses the L3 unicast
3016 * group. The L3 unicast group can also be referred to by
3017 * other routes' nexthops.
3018 */
3019
Scott Feldmanc4f20322015-05-10 09:47:50 -07003020 err = rocker_group_l3_unicast(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003021 entry->index,
3022 rocker_port->dev->dev_addr,
3023 entry->eth_dst,
3024 rocker_port->internal_vlan_id,
3025 entry->ttl_check,
3026 rocker_port->pport);
3027 if (err) {
3028 netdev_err(rocker_port->dev,
3029 "Error (%d) L3 unicast group index %d\n",
3030 err, entry->index);
3031 goto err_out;
3032 }
3033
3034 if (adding || removing) {
3035 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003036 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003037 eth_type, ip_addr,
3038 inet_make_mask(32),
3039 priority, goto_tbl,
3040 group_id, flags);
3041
3042 if (err)
3043 netdev_err(rocker_port->dev,
3044 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3045 err, &entry->ip_addr, group_id);
3046 }
3047
3048err_out:
3049 if (!adding)
Simon Horman0985df72015-05-25 14:28:35 +09003050 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003051
3052 return err;
3053}
3054
3055static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003056 enum switchdev_trans trans, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003057{
3058 struct net_device *dev = rocker_port->dev;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003059 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003060 int err = 0;
3061
Ying Xue4133fc02015-05-15 12:53:21 +08003062 if (!n) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003063 n = neigh_create(&arp_tbl, &ip_addr, dev);
Ying Xue4133fc02015-05-15 12:53:21 +08003064 if (IS_ERR(n))
3065 return IS_ERR(n);
3066 }
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003067
3068 /* If the neigh is already resolved, then go ahead and
3069 * install the entry, otherwise start the ARP process to
3070 * resolve the neigh.
3071 */
3072
3073 if (n->nud_state & NUD_VALID)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003074 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3075 ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003076 else
3077 neigh_event_send(n, NULL);
3078
Ying Xue4133fc02015-05-15 12:53:21 +08003079 neigh_release(n);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003080 return err;
3081}
3082
Scott Feldmanc4f20322015-05-10 09:47:50 -07003083static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3084 enum switchdev_trans trans, int flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003085 __be32 ip_addr, u32 *index)
3086{
3087 struct rocker *rocker = rocker_port->rocker;
3088 struct rocker_neigh_tbl_entry *entry;
3089 struct rocker_neigh_tbl_entry *found;
3090 unsigned long lock_flags;
3091 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3092 bool updating;
3093 bool removing;
3094 bool resolved = true;
3095 int err = 0;
3096
Scott Feldman179f9a22015-06-12 21:35:46 -07003097 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003098 if (!entry)
3099 return -ENOMEM;
3100
3101 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3102
3103 found = rocker_neigh_tbl_find(rocker, ip_addr);
3104 if (found)
3105 *index = found->index;
3106
3107 updating = found && adding;
3108 removing = found && !adding;
3109 adding = !found && adding;
3110
3111 if (adding) {
3112 entry->ip_addr = ip_addr;
3113 entry->dev = rocker_port->dev;
Simon Horman550ecc92015-05-21 12:40:16 +09003114 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003115 *index = entry->index;
3116 resolved = false;
3117 } else if (removing) {
Simon Horman0985df72015-05-25 14:28:35 +09003118 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003119 } else if (updating) {
Simon Horman550ecc92015-05-21 12:40:16 +09003120 _rocker_neigh_update(found, trans, NULL, false);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003121 resolved = !is_zero_ether_addr(found->eth_dst);
3122 } else {
3123 err = -ENOENT;
3124 }
3125
3126 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3127
3128 if (!adding)
Simon Horman0985df72015-05-25 14:28:35 +09003129 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003130
3131 if (err)
3132 return err;
3133
3134 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3135
3136 if (!resolved)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003137 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003138
3139 return err;
3140}
3141
Scott Feldman6c707942014-11-28 14:34:28 +01003142static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003143 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003144 int flags, __be16 vlan_id)
3145{
3146 struct rocker_port *p;
Simon Hormane5054642015-05-25 14:28:36 +09003147 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003148 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
Scott Feldman04f49fa2015-03-15 23:04:46 -07003149 u32 *group_ids;
Scott Feldman6c707942014-11-28 14:34:28 +01003150 u8 group_count = 0;
Scott Feldman04f49fa2015-03-15 23:04:46 -07003151 int err = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01003152 int i;
3153
Scott Feldman179f9a22015-06-12 21:35:46 -07003154 group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
3155 rocker->port_count, sizeof(u32));
Scott Feldman04f49fa2015-03-15 23:04:46 -07003156 if (!group_ids)
3157 return -ENOMEM;
3158
Scott Feldman6c707942014-11-28 14:34:28 +01003159 /* Adjust the flood group for this VLAN. The flood group
3160 * references an L2 interface group for each port in this
3161 * VLAN.
3162 */
3163
3164 for (i = 0; i < rocker->port_count; i++) {
3165 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003166 if (!p)
3167 continue;
Scott Feldman6c707942014-11-28 14:34:28 +01003168 if (!rocker_port_is_bridged(p))
3169 continue;
3170 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3171 group_ids[group_count++] =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003172 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003173 }
3174 }
3175
3176 /* If there are no bridged ports in this VLAN, we're done */
3177 if (group_count == 0)
Scott Feldman04f49fa2015-03-15 23:04:46 -07003178 goto no_ports_in_vlan;
Scott Feldman6c707942014-11-28 14:34:28 +01003179
Scott Feldmanc4f20322015-05-10 09:47:50 -07003180 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3181 group_count, group_ids, group_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003182 if (err)
3183 netdev_err(rocker_port->dev,
3184 "Error (%d) port VLAN l2 flood group\n", err);
3185
Scott Feldman04f49fa2015-03-15 23:04:46 -07003186no_ports_in_vlan:
Simon Horman0985df72015-05-25 14:28:35 +09003187 rocker_port_kfree(trans, group_ids);
Scott Feldman6c707942014-11-28 14:34:28 +01003188 return err;
3189}
3190
3191static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003192 enum switchdev_trans trans, int flags,
3193 __be16 vlan_id, bool pop_vlan)
Scott Feldman6c707942014-11-28 14:34:28 +01003194{
Simon Hormane5054642015-05-25 14:28:36 +09003195 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003196 struct rocker_port *p;
3197 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003198 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003199 int ref = 0;
3200 int err;
3201 int i;
3202
3203 /* An L2 interface group for this port in this VLAN, but
3204 * only when port STP state is LEARNING|FORWARDING.
3205 */
3206
3207 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3208 rocker_port->stp_state == BR_STATE_FORWARDING) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003209 out_pport = rocker_port->pport;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003210 err = rocker_group_l2_interface(rocker_port, trans, flags,
3211 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003212 if (err) {
3213 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003214 "Error (%d) port VLAN l2 group for pport %d\n",
3215 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003216 return err;
3217 }
3218 }
3219
3220 /* An L2 interface group for this VLAN to CPU port.
3221 * Add when first port joins this VLAN and destroy when
3222 * last port leaves this VLAN.
3223 */
3224
3225 for (i = 0; i < rocker->port_count; i++) {
3226 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003227 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003228 ref++;
3229 }
3230
3231 if ((!adding || ref != 1) && (adding || ref != 0))
3232 return 0;
3233
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003234 out_pport = 0;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003235 err = rocker_group_l2_interface(rocker_port, trans, flags,
3236 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003237 if (err) {
3238 netdev_err(rocker_port->dev,
3239 "Error (%d) port VLAN l2 group for CPU port\n", err);
3240 return err;
3241 }
3242
3243 return 0;
3244}
3245
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003246static struct rocker_ctrl {
3247 const u8 *eth_dst;
3248 const u8 *eth_dst_mask;
Jiri Pirko11e6c652014-12-03 14:14:54 +01003249 __be16 eth_type;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003250 bool acl;
3251 bool bridge;
3252 bool term;
3253 bool copy_to_cpu;
3254} rocker_ctrls[] = {
3255 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3256 /* pass link local multicast pkts up to CPU for filtering */
3257 .eth_dst = ll_mac,
3258 .eth_dst_mask = ll_mask,
3259 .acl = true,
3260 },
3261 [ROCKER_CTRL_LOCAL_ARP] = {
3262 /* pass local ARP pkts up to CPU */
3263 .eth_dst = zero_mac,
3264 .eth_dst_mask = zero_mac,
3265 .eth_type = htons(ETH_P_ARP),
3266 .acl = true,
3267 },
3268 [ROCKER_CTRL_IPV4_MCAST] = {
3269 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3270 .eth_dst = ipv4_mcast,
3271 .eth_dst_mask = ipv4_mask,
3272 .eth_type = htons(ETH_P_IP),
3273 .term = true,
3274 .copy_to_cpu = true,
3275 },
3276 [ROCKER_CTRL_IPV6_MCAST] = {
3277 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3278 .eth_dst = ipv6_mcast,
3279 .eth_dst_mask = ipv6_mask,
3280 .eth_type = htons(ETH_P_IPV6),
3281 .term = true,
3282 .copy_to_cpu = true,
3283 },
3284 [ROCKER_CTRL_DFLT_BRIDGING] = {
3285 /* flood any pkts on vlan */
3286 .bridge = true,
3287 .copy_to_cpu = true,
3288 },
Simon Horman82549732015-07-16 10:39:14 +09003289 [ROCKER_CTRL_DFLT_OVS] = {
3290 /* pass all pkts up to CPU */
3291 .eth_dst = zero_mac,
3292 .eth_dst_mask = zero_mac,
3293 .acl = true,
3294 },
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003295};
3296
3297static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003298 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003299 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003300{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003301 u32 in_pport = rocker_port->pport;
3302 u32 in_pport_mask = 0xffffffff;
3303 u32 out_pport = 0;
Simon Hormane5054642015-05-25 14:28:36 +09003304 const u8 *eth_src = NULL;
3305 const u8 *eth_src_mask = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003306 __be16 vlan_id_mask = htons(0xffff);
3307 u8 ip_proto = 0;
3308 u8 ip_proto_mask = 0;
3309 u8 ip_tos = 0;
3310 u8 ip_tos_mask = 0;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003311 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003312 int err;
3313
Scott Feldmanc4f20322015-05-10 09:47:50 -07003314 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003315 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003316 eth_src, eth_src_mask,
3317 ctrl->eth_dst, ctrl->eth_dst_mask,
3318 ctrl->eth_type,
3319 vlan_id, vlan_id_mask,
3320 ip_proto, ip_proto_mask,
3321 ip_tos, ip_tos_mask,
3322 group_id);
3323
3324 if (err)
3325 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3326
3327 return err;
3328}
3329
Scott Feldman6c707942014-11-28 14:34:28 +01003330static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003331 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003332 const struct rocker_ctrl *ctrl,
Scott Feldman6c707942014-11-28 14:34:28 +01003333 __be16 vlan_id)
3334{
3335 enum rocker_of_dpa_table_id goto_tbl =
3336 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3337 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3338 u32 tunnel_id = 0;
3339 int err;
3340
3341 if (!rocker_port_is_bridged(rocker_port))
3342 return 0;
3343
Scott Feldmanc4f20322015-05-10 09:47:50 -07003344 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003345 ctrl->eth_dst, ctrl->eth_dst_mask,
3346 vlan_id, tunnel_id,
3347 goto_tbl, group_id, ctrl->copy_to_cpu);
3348
3349 if (err)
3350 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3351
3352 return err;
3353}
3354
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003355static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003356 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003357 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003358{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003359 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003360 __be16 vlan_id_mask = htons(0xffff);
3361 int err;
3362
3363 if (ntohs(vlan_id) == 0)
3364 vlan_id = rocker_port->internal_vlan_id;
3365
Scott Feldmanc4f20322015-05-10 09:47:50 -07003366 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003367 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003368 ctrl->eth_type, ctrl->eth_dst,
3369 ctrl->eth_dst_mask, vlan_id,
3370 vlan_id_mask, ctrl->copy_to_cpu,
3371 flags);
3372
3373 if (err)
3374 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3375
3376 return err;
3377}
3378
Scott Feldmanc4f20322015-05-10 09:47:50 -07003379static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3380 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003381 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003382{
3383 if (ctrl->acl)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003384 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003385 ctrl, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003386 if (ctrl->bridge)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003387 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003388 ctrl, vlan_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003389
3390 if (ctrl->term)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003391 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003392 ctrl, vlan_id);
3393
3394 return -EOPNOTSUPP;
3395}
3396
3397static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003398 enum switchdev_trans trans, int flags,
3399 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003400{
3401 int err = 0;
3402 int i;
3403
3404 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3405 if (rocker_port->ctrls[i]) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003406 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003407 &rocker_ctrls[i], vlan_id);
3408 if (err)
3409 return err;
3410 }
3411 }
3412
3413 return err;
3414}
3415
Scott Feldmanc4f20322015-05-10 09:47:50 -07003416static int rocker_port_ctrl(struct rocker_port *rocker_port,
3417 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003418 const struct rocker_ctrl *ctrl)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003419{
3420 u16 vid;
3421 int err = 0;
3422
3423 for (vid = 1; vid < VLAN_N_VID; vid++) {
3424 if (!test_bit(vid, rocker_port->vlan_bitmap))
3425 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003426 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003427 ctrl, htons(vid));
3428 if (err)
3429 break;
3430 }
3431
3432 return err;
3433}
3434
Scott Feldmanc4f20322015-05-10 09:47:50 -07003435static int rocker_port_vlan(struct rocker_port *rocker_port,
3436 enum switchdev_trans trans, int flags, u16 vid)
Scott Feldman6c707942014-11-28 14:34:28 +01003437{
3438 enum rocker_of_dpa_table_id goto_tbl =
3439 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003440 u32 in_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003441 __be16 vlan_id = htons(vid);
3442 __be16 vlan_id_mask = htons(0xffff);
3443 __be16 internal_vlan_id;
3444 bool untagged;
3445 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3446 int err;
3447
3448 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3449
Scott Feldman9228ad22015-05-10 09:47:54 -07003450 if (adding && test_bit(ntohs(internal_vlan_id),
3451 rocker_port->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003452 return 0; /* already added */
Scott Feldman9228ad22015-05-10 09:47:54 -07003453 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3454 rocker_port->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003455 return 0; /* already removed */
3456
Scott Feldman9228ad22015-05-10 09:47:54 -07003457 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3458
Scott Feldman6c707942014-11-28 14:34:28 +01003459 if (adding) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003460 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003461 internal_vlan_id);
3462 if (err) {
3463 netdev_err(rocker_port->dev,
3464 "Error (%d) port ctrl vlan add\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003465 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003466 }
3467 }
3468
Scott Feldmanc4f20322015-05-10 09:47:50 -07003469 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003470 internal_vlan_id, untagged);
3471 if (err) {
3472 netdev_err(rocker_port->dev,
3473 "Error (%d) port VLAN l2 groups\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003474 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003475 }
3476
Scott Feldmanc4f20322015-05-10 09:47:50 -07003477 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003478 internal_vlan_id);
3479 if (err) {
3480 netdev_err(rocker_port->dev,
3481 "Error (%d) port VLAN l2 flood group\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003482 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003483 }
3484
Scott Feldmanc4f20322015-05-10 09:47:50 -07003485 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003486 in_pport, vlan_id, vlan_id_mask,
Scott Feldman6c707942014-11-28 14:34:28 +01003487 goto_tbl, untagged, internal_vlan_id);
3488 if (err)
3489 netdev_err(rocker_port->dev,
3490 "Error (%d) port VLAN table\n", err);
3491
Scott Feldman9228ad22015-05-10 09:47:54 -07003492err_out:
3493 if (trans == SWITCHDEV_TRANS_PREPARE)
3494 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3495
Scott Feldman6c707942014-11-28 14:34:28 +01003496 return err;
3497}
3498
Scott Feldmanc4f20322015-05-10 09:47:50 -07003499static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3500 enum switchdev_trans trans, int flags)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003501{
3502 enum rocker_of_dpa_table_id goto_tbl;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003503 u32 in_pport;
3504 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003505 int err;
3506
3507 /* Normal Ethernet Frames. Matches pkts from any local physical
3508 * ports. Goto VLAN tbl.
3509 */
3510
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003511 in_pport = 0;
3512 in_pport_mask = 0xffff0000;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003513 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3514
Scott Feldmanc4f20322015-05-10 09:47:50 -07003515 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003516 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003517 goto_tbl);
3518 if (err)
3519 netdev_err(rocker_port->dev,
3520 "Error (%d) ingress port table entry\n", err);
3521
3522 return err;
3523}
3524
Scott Feldman6c707942014-11-28 14:34:28 +01003525struct rocker_fdb_learn_work {
3526 struct work_struct work;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003527 struct rocker_port *rocker_port;
3528 enum switchdev_trans trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003529 int flags;
3530 u8 addr[ETH_ALEN];
3531 u16 vid;
3532};
3533
3534static void rocker_port_fdb_learn_work(struct work_struct *work)
3535{
Simon Hormane5054642015-05-25 14:28:36 +09003536 const struct rocker_fdb_learn_work *lw =
Scott Feldman6c707942014-11-28 14:34:28 +01003537 container_of(work, struct rocker_fdb_learn_work, work);
3538 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3539 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003540 struct switchdev_notifier_fdb_info info;
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003541
3542 info.addr = lw->addr;
3543 info.vid = lw->vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003544
Thomas Graf51ace882014-11-28 14:34:32 +01003545 if (learned && removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003546 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003547 lw->rocker_port->dev, &info.info);
Thomas Graf51ace882014-11-28 14:34:32 +01003548 else if (learned && !removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003549 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003550 lw->rocker_port->dev, &info.info);
Scott Feldman6c707942014-11-28 14:34:28 +01003551
Simon Horman0985df72015-05-25 14:28:35 +09003552 rocker_port_kfree(lw->trans, work);
Scott Feldman6c707942014-11-28 14:34:28 +01003553}
3554
3555static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003556 enum switchdev_trans trans, int flags,
3557 const u8 *addr, __be16 vlan_id)
Scott Feldman6c707942014-11-28 14:34:28 +01003558{
3559 struct rocker_fdb_learn_work *lw;
3560 enum rocker_of_dpa_table_id goto_tbl =
3561 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003562 u32 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003563 u32 tunnel_id = 0;
3564 u32 group_id = ROCKER_GROUP_NONE;
Scott Feldman5111f802014-11-28 14:34:30 +01003565 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
Scott Feldman6c707942014-11-28 14:34:28 +01003566 bool copy_to_cpu = false;
3567 int err;
3568
3569 if (rocker_port_is_bridged(rocker_port))
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003570 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003571
3572 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003573 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3574 NULL, vlan_id, tunnel_id, goto_tbl,
Scott Feldman6c707942014-11-28 14:34:28 +01003575 group_id, copy_to_cpu);
3576 if (err)
3577 return err;
3578 }
3579
Scott Feldman5111f802014-11-28 14:34:30 +01003580 if (!syncing)
3581 return 0;
3582
Scott Feldman6c707942014-11-28 14:34:28 +01003583 if (!rocker_port_is_bridged(rocker_port))
3584 return 0;
3585
Scott Feldman179f9a22015-06-12 21:35:46 -07003586 lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
Scott Feldman6c707942014-11-28 14:34:28 +01003587 if (!lw)
3588 return -ENOMEM;
3589
3590 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3591
Scott Feldmanc4f20322015-05-10 09:47:50 -07003592 lw->rocker_port = rocker_port;
3593 lw->trans = trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003594 lw->flags = flags;
3595 ether_addr_copy(lw->addr, addr);
3596 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3597
Scott Feldmanc4f20322015-05-10 09:47:50 -07003598 if (trans == SWITCHDEV_TRANS_PREPARE)
Simon Horman0985df72015-05-25 14:28:35 +09003599 rocker_port_kfree(trans, lw);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003600 else
3601 schedule_work(&lw->work);
Scott Feldman6c707942014-11-28 14:34:28 +01003602
3603 return 0;
3604}
3605
3606static struct rocker_fdb_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003607rocker_fdb_tbl_find(const struct rocker *rocker,
3608 const struct rocker_fdb_tbl_entry *match)
Scott Feldman6c707942014-11-28 14:34:28 +01003609{
3610 struct rocker_fdb_tbl_entry *found;
3611
3612 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3613 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3614 return found;
3615
3616 return NULL;
3617}
3618
3619static int rocker_port_fdb(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003620 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003621 const unsigned char *addr,
3622 __be16 vlan_id, int flags)
3623{
3624 struct rocker *rocker = rocker_port->rocker;
3625 struct rocker_fdb_tbl_entry *fdb;
3626 struct rocker_fdb_tbl_entry *found;
3627 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3628 unsigned long lock_flags;
3629
Scott Feldman179f9a22015-06-12 21:35:46 -07003630 fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
Scott Feldman6c707942014-11-28 14:34:28 +01003631 if (!fdb)
3632 return -ENOMEM;
3633
3634 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
Scott Feldmana471be42015-09-23 08:39:14 -07003635 fdb->touched = jiffies;
Scott Feldman4c660492015-09-23 08:39:15 -07003636 fdb->key.rocker_port = rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01003637 ether_addr_copy(fdb->key.addr, addr);
3638 fdb->key.vlan_id = vlan_id;
3639 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3640
3641 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3642
3643 found = rocker_fdb_tbl_find(rocker, fdb);
3644
Scott Feldmana471be42015-09-23 08:39:14 -07003645 if (found) {
3646 found->touched = jiffies;
3647 if (removing) {
3648 rocker_port_kfree(trans, fdb);
3649 if (trans != SWITCHDEV_TRANS_PREPARE)
3650 hash_del(&found->entry);
3651 }
3652 } else if (!removing) {
Simon Horman42e94882015-05-21 12:40:15 +09003653 if (trans != SWITCHDEV_TRANS_PREPARE)
Scott Feldmana471be42015-09-23 08:39:14 -07003654 hash_add(rocker->fdb_tbl, &fdb->entry,
3655 fdb->key_crc32);
Scott Feldman6c707942014-11-28 14:34:28 +01003656 }
3657
3658 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3659
3660 /* Check if adding and already exists, or removing and can't find */
3661 if (!found != !removing) {
Simon Horman0985df72015-05-25 14:28:35 +09003662 rocker_port_kfree(trans, fdb);
Scott Feldman6c707942014-11-28 14:34:28 +01003663 if (!found && removing)
3664 return 0;
3665 /* Refreshing existing to update aging timers */
3666 flags |= ROCKER_OP_FLAG_REFRESH;
3667 }
3668
Scott Feldmanc4f20322015-05-10 09:47:50 -07003669 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003670}
3671
Scott Feldmanc4f20322015-05-10 09:47:50 -07003672static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003673 enum switchdev_trans trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003674{
3675 struct rocker *rocker = rocker_port->rocker;
3676 struct rocker_fdb_tbl_entry *found;
3677 unsigned long lock_flags;
Scott Feldman6c707942014-11-28 14:34:28 +01003678 struct hlist_node *tmp;
3679 int bkt;
3680 int err = 0;
3681
3682 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3683 rocker_port->stp_state == BR_STATE_FORWARDING)
3684 return 0;
3685
Scott Feldman179f9a22015-06-12 21:35:46 -07003686 flags |= ROCKER_OP_FLAG_REMOVE;
3687
Scott Feldman6c707942014-11-28 14:34:28 +01003688 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3689
3690 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07003691 if (found->key.rocker_port != rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +01003692 continue;
3693 if (!found->learned)
3694 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003695 err = rocker_port_fdb_learn(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003696 found->key.addr,
3697 found->key.vlan_id);
3698 if (err)
3699 goto err_out;
Simon Horman3098ac32015-05-21 12:40:14 +09003700 if (trans != SWITCHDEV_TRANS_PREPARE)
3701 hash_del(&found->entry);
Scott Feldman6c707942014-11-28 14:34:28 +01003702 }
3703
3704err_out:
3705 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3706
3707 return err;
3708}
3709
Scott Feldman52fe3e22015-09-23 08:39:18 -07003710static void rocker_fdb_cleanup(unsigned long data)
3711{
3712 struct rocker *rocker = (struct rocker *)data;
3713 struct rocker_port *rocker_port;
3714 struct rocker_fdb_tbl_entry *entry;
3715 struct hlist_node *tmp;
3716 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3717 unsigned long expires;
3718 unsigned long lock_flags;
3719 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3720 ROCKER_OP_FLAG_LEARNED;
3721 int bkt;
3722
3723 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3724
3725 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3726 if (!entry->learned)
3727 continue;
3728 rocker_port = entry->key.rocker_port;
3729 expires = entry->touched + rocker_port->ageing_time;
3730 if (time_before_eq(expires, jiffies)) {
3731 rocker_port_fdb_learn(rocker_port, SWITCHDEV_TRANS_NONE,
3732 flags, entry->key.addr,
3733 entry->key.vlan_id);
3734 hash_del(&entry->entry);
3735 } else if (time_before(expires, next_timer)) {
3736 next_timer = expires;
3737 }
3738 }
3739
3740 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3741
3742 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3743}
3744
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003745static int rocker_port_router_mac(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003746 enum switchdev_trans trans, int flags,
3747 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003748{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003749 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003750 __be16 eth_type;
3751 const u8 *dst_mac_mask = ff_mac;
3752 __be16 vlan_id_mask = htons(0xffff);
3753 bool copy_to_cpu = false;
3754 int err;
3755
3756 if (ntohs(vlan_id) == 0)
3757 vlan_id = rocker_port->internal_vlan_id;
3758
3759 eth_type = htons(ETH_P_IP);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003760 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003761 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003762 eth_type, rocker_port->dev->dev_addr,
3763 dst_mac_mask, vlan_id, vlan_id_mask,
3764 copy_to_cpu, flags);
3765 if (err)
3766 return err;
3767
3768 eth_type = htons(ETH_P_IPV6);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003769 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003770 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003771 eth_type, rocker_port->dev->dev_addr,
3772 dst_mac_mask, vlan_id, vlan_id_mask,
3773 copy_to_cpu, flags);
3774
3775 return err;
3776}
3777
Scott Feldmanc4f20322015-05-10 09:47:50 -07003778static int rocker_port_fwding(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003779 enum switchdev_trans trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003780{
3781 bool pop_vlan;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003782 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003783 __be16 vlan_id;
3784 u16 vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003785 int err;
3786
3787 /* Port will be forwarding-enabled if its STP state is LEARNING
3788 * or FORWARDING. Traffic from CPU can still egress, regardless of
3789 * port STP state. Use L2 interface group on port VLANs as a way
3790 * to toggle port forwarding: if forwarding is disabled, L2
3791 * interface group will not exist.
3792 */
3793
3794 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3795 rocker_port->stp_state != BR_STATE_FORWARDING)
3796 flags |= ROCKER_OP_FLAG_REMOVE;
3797
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003798 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003799 for (vid = 1; vid < VLAN_N_VID; vid++) {
3800 if (!test_bit(vid, rocker_port->vlan_bitmap))
3801 continue;
3802 vlan_id = htons(vid);
3803 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003804 err = rocker_group_l2_interface(rocker_port, trans, flags,
3805 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003806 if (err) {
3807 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003808 "Error (%d) port VLAN l2 group for pport %d\n",
3809 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003810 return err;
3811 }
3812 }
3813
3814 return 0;
3815}
3816
Scott Feldmanc4f20322015-05-10 09:47:50 -07003817static int rocker_port_stp_update(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003818 enum switchdev_trans trans, int flags,
3819 u8 state)
Scott Feldman6c707942014-11-28 14:34:28 +01003820{
3821 bool want[ROCKER_CTRL_MAX] = { 0, };
Scott Feldmanc4f20322015-05-10 09:47:50 -07003822 bool prev_ctrls[ROCKER_CTRL_MAX];
3823 u8 prev_state;
Scott Feldman6c707942014-11-28 14:34:28 +01003824 int err;
3825 int i;
3826
Scott Feldmanc4f20322015-05-10 09:47:50 -07003827 if (trans == SWITCHDEV_TRANS_PREPARE) {
3828 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3829 prev_state = rocker_port->stp_state;
3830 }
3831
Scott Feldman6c707942014-11-28 14:34:28 +01003832 if (rocker_port->stp_state == state)
3833 return 0;
3834
3835 rocker_port->stp_state = state;
3836
3837 switch (state) {
3838 case BR_STATE_DISABLED:
3839 /* port is completely disabled */
3840 break;
3841 case BR_STATE_LISTENING:
3842 case BR_STATE_BLOCKING:
3843 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3844 break;
3845 case BR_STATE_LEARNING:
3846 case BR_STATE_FORWARDING:
Simon Horman82549732015-07-16 10:39:14 +09003847 if (!rocker_port_is_ovsed(rocker_port))
3848 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003849 want[ROCKER_CTRL_IPV4_MCAST] = true;
3850 want[ROCKER_CTRL_IPV6_MCAST] = true;
3851 if (rocker_port_is_bridged(rocker_port))
3852 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
Simon Horman82549732015-07-16 10:39:14 +09003853 else if (rocker_port_is_ovsed(rocker_port))
3854 want[ROCKER_CTRL_DFLT_OVS] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003855 else
3856 want[ROCKER_CTRL_LOCAL_ARP] = true;
3857 break;
3858 }
3859
3860 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3861 if (want[i] != rocker_port->ctrls[i]) {
Scott Feldman179f9a22015-06-12 21:35:46 -07003862 int ctrl_flags = flags |
3863 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3864 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003865 &rocker_ctrls[i]);
3866 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003867 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003868 rocker_port->ctrls[i] = want[i];
3869 }
3870 }
3871
Scott Feldman179f9a22015-06-12 21:35:46 -07003872 err = rocker_port_fdb_flush(rocker_port, trans, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01003873 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003874 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003875
Scott Feldman179f9a22015-06-12 21:35:46 -07003876 err = rocker_port_fwding(rocker_port, trans, flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003877
3878err_out:
3879 if (trans == SWITCHDEV_TRANS_PREPARE) {
3880 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3881 rocker_port->stp_state = prev_state;
3882 }
3883
3884 return err;
Scott Feldman6c707942014-11-28 14:34:28 +01003885}
3886
Scott Feldmanc4f20322015-05-10 09:47:50 -07003887static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003888 enum switchdev_trans trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003889{
3890 if (rocker_port_is_bridged(rocker_port))
3891 /* bridge STP will enable port */
3892 return 0;
3893
3894 /* port is not bridged, so simulate going to FORWARDING state */
Scott Feldman179f9a22015-06-12 21:35:46 -07003895 return rocker_port_stp_update(rocker_port, trans, flags,
3896 BR_STATE_FORWARDING);
Scott Feldmane47172a2015-02-25 20:15:38 -08003897}
3898
Scott Feldmanc4f20322015-05-10 09:47:50 -07003899static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003900 enum switchdev_trans trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003901{
3902 if (rocker_port_is_bridged(rocker_port))
3903 /* bridge STP will disable port */
3904 return 0;
3905
3906 /* port is not bridged, so simulate going to DISABLED state */
Scott Feldman179f9a22015-06-12 21:35:46 -07003907 return rocker_port_stp_update(rocker_port, trans, flags,
3908 BR_STATE_DISABLED);
Scott Feldmane47172a2015-02-25 20:15:38 -08003909}
3910
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003911static struct rocker_internal_vlan_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003912rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003913{
3914 struct rocker_internal_vlan_tbl_entry *found;
3915
3916 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3917 entry, ifindex) {
3918 if (found->ifindex == ifindex)
3919 return found;
3920 }
3921
3922 return NULL;
3923}
3924
3925static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3926 int ifindex)
3927{
3928 struct rocker *rocker = rocker_port->rocker;
3929 struct rocker_internal_vlan_tbl_entry *entry;
3930 struct rocker_internal_vlan_tbl_entry *found;
3931 unsigned long lock_flags;
3932 int i;
3933
Simon Hormandf6a2062015-05-21 12:40:17 +09003934 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003935 if (!entry)
3936 return 0;
3937
3938 entry->ifindex = ifindex;
3939
3940 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3941
3942 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3943 if (found) {
Simon Hormandf6a2062015-05-21 12:40:17 +09003944 kfree(entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003945 goto found;
3946 }
3947
3948 found = entry;
3949 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3950
3951 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3952 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3953 continue;
3954 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3955 goto found;
3956 }
3957
3958 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3959
3960found:
3961 found->ref_count++;
3962 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3963
3964 return found->vlan_id;
3965}
3966
Simon Hormane5054642015-05-25 14:28:36 +09003967static void
3968rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3969 int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003970{
3971 struct rocker *rocker = rocker_port->rocker;
3972 struct rocker_internal_vlan_tbl_entry *found;
3973 unsigned long lock_flags;
3974 unsigned long bit;
3975
3976 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3977
3978 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3979 if (!found) {
3980 netdev_err(rocker_port->dev,
3981 "ifindex (%d) not found in internal VLAN tbl\n",
3982 ifindex);
3983 goto not_found;
3984 }
3985
3986 if (--found->ref_count <= 0) {
3987 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3988 clear_bit(bit, rocker->internal_vlan_bitmap);
3989 hash_del(&found->entry);
Simon Hormandf6a2062015-05-21 12:40:17 +09003990 kfree(found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003991 }
3992
3993not_found:
3994 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3995}
3996
Scott Feldmanc4f20322015-05-10 09:47:50 -07003997static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
3998 enum switchdev_trans trans, __be32 dst,
Simon Hormane5054642015-05-25 14:28:36 +09003999 int dst_len, const struct fib_info *fi,
4000 u32 tb_id, int flags)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004001{
Simon Hormane5054642015-05-25 14:28:36 +09004002 const struct fib_nh *nh;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004003 __be16 eth_type = htons(ETH_P_IP);
4004 __be32 dst_mask = inet_make_mask(dst_len);
4005 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4006 u32 priority = fi->fib_priority;
4007 enum rocker_of_dpa_table_id goto_tbl =
4008 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4009 u32 group_id;
4010 bool nh_on_port;
4011 bool has_gw;
4012 u32 index;
4013 int err;
4014
4015 /* XXX support ECMP */
4016
4017 nh = fi->fib_nh;
4018 nh_on_port = (fi->fib_dev == rocker_port->dev);
4019 has_gw = !!nh->nh_gw;
4020
4021 if (has_gw && nh_on_port) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07004022 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004023 nh->nh_gw, &index);
4024 if (err)
4025 return err;
4026
4027 group_id = ROCKER_GROUP_L3_UNICAST(index);
4028 } else {
4029 /* Send to CPU for processing */
4030 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4031 }
4032
Scott Feldmanc4f20322015-05-10 09:47:50 -07004033 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004034 dst_mask, priority, goto_tbl,
4035 group_id, flags);
4036 if (err)
4037 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4038 err, &dst);
4039
4040 return err;
4041}
4042
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004043/*****************
4044 * Net device ops
4045 *****************/
4046
4047static int rocker_port_open(struct net_device *dev)
4048{
4049 struct rocker_port *rocker_port = netdev_priv(dev);
4050 int err;
4051
4052 err = rocker_port_dma_rings_init(rocker_port);
4053 if (err)
4054 return err;
4055
4056 err = request_irq(rocker_msix_tx_vector(rocker_port),
4057 rocker_tx_irq_handler, 0,
4058 rocker_driver_name, rocker_port);
4059 if (err) {
4060 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4061 goto err_request_tx_irq;
4062 }
4063
4064 err = request_irq(rocker_msix_rx_vector(rocker_port),
4065 rocker_rx_irq_handler, 0,
4066 rocker_driver_name, rocker_port);
4067 if (err) {
4068 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4069 goto err_request_rx_irq;
4070 }
4071
Scott Feldman179f9a22015-06-12 21:35:46 -07004072 err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01004073 if (err)
Scott Feldmane47172a2015-02-25 20:15:38 -08004074 goto err_fwd_enable;
Scott Feldman6c707942014-11-28 14:34:28 +01004075
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004076 napi_enable(&rocker_port->napi_tx);
4077 napi_enable(&rocker_port->napi_rx);
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004078 if (!dev->proto_down)
4079 rocker_port_set_enable(rocker_port, true);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004080 netif_start_queue(dev);
4081 return 0;
4082
Scott Feldmane47172a2015-02-25 20:15:38 -08004083err_fwd_enable:
Scott Feldman6c707942014-11-28 14:34:28 +01004084 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004085err_request_rx_irq:
4086 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4087err_request_tx_irq:
4088 rocker_port_dma_rings_fini(rocker_port);
4089 return err;
4090}
4091
4092static int rocker_port_stop(struct net_device *dev)
4093{
4094 struct rocker_port *rocker_port = netdev_priv(dev);
4095
4096 netif_stop_queue(dev);
4097 rocker_port_set_enable(rocker_port, false);
4098 napi_disable(&rocker_port->napi_rx);
4099 napi_disable(&rocker_port->napi_tx);
Scott Feldmanf66feaa2015-06-12 21:35:50 -07004100 rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE,
4101 ROCKER_OP_FLAG_NOWAIT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004102 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4103 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4104 rocker_port_dma_rings_fini(rocker_port);
4105
4106 return 0;
4107}
4108
Simon Hormane5054642015-05-25 14:28:36 +09004109static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4110 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004111{
Simon Hormane5054642015-05-25 14:28:36 +09004112 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004113 struct pci_dev *pdev = rocker->pdev;
Simon Hormane5054642015-05-25 14:28:36 +09004114 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004115 struct rocker_tlv *attr;
4116 int rem;
4117
4118 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4119 if (!attrs[ROCKER_TLV_TX_FRAGS])
4120 return;
4121 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
Simon Hormane5054642015-05-25 14:28:36 +09004122 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004123 dma_addr_t dma_handle;
4124 size_t len;
4125
4126 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4127 continue;
4128 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4129 attr);
4130 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4131 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4132 continue;
4133 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4134 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4135 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4136 }
4137}
4138
Simon Hormane5054642015-05-25 14:28:36 +09004139static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004140 struct rocker_desc_info *desc_info,
4141 char *buf, size_t buf_len)
4142{
Simon Hormane5054642015-05-25 14:28:36 +09004143 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004144 struct pci_dev *pdev = rocker->pdev;
4145 dma_addr_t dma_handle;
4146 struct rocker_tlv *frag;
4147
4148 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4149 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4150 if (net_ratelimit())
4151 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4152 return -EIO;
4153 }
4154 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4155 if (!frag)
4156 goto unmap_frag;
4157 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4158 dma_handle))
4159 goto nest_cancel;
4160 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4161 buf_len))
4162 goto nest_cancel;
4163 rocker_tlv_nest_end(desc_info, frag);
4164 return 0;
4165
4166nest_cancel:
4167 rocker_tlv_nest_cancel(desc_info, frag);
4168unmap_frag:
4169 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4170 return -EMSGSIZE;
4171}
4172
4173static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4174{
4175 struct rocker_port *rocker_port = netdev_priv(dev);
4176 struct rocker *rocker = rocker_port->rocker;
4177 struct rocker_desc_info *desc_info;
4178 struct rocker_tlv *frags;
4179 int i;
4180 int err;
4181
4182 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4183 if (unlikely(!desc_info)) {
4184 if (net_ratelimit())
4185 netdev_err(dev, "tx ring full when queue awake\n");
4186 return NETDEV_TX_BUSY;
4187 }
4188
4189 rocker_desc_cookie_ptr_set(desc_info, skb);
4190
4191 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4192 if (!frags)
4193 goto out;
4194 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4195 skb->data, skb_headlen(skb));
4196 if (err)
4197 goto nest_cancel;
Jiri Pirko95b9be62015-08-02 20:56:38 +02004198 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4199 err = skb_linearize(skb);
4200 if (err)
4201 goto unmap_frags;
4202 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004203
4204 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4205 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4206
4207 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4208 skb_frag_address(frag),
4209 skb_frag_size(frag));
4210 if (err)
4211 goto unmap_frags;
4212 }
4213 rocker_tlv_nest_end(desc_info, frags);
4214
4215 rocker_desc_gen_clear(desc_info);
4216 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4217
4218 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4219 if (!desc_info)
4220 netif_stop_queue(dev);
4221
4222 return NETDEV_TX_OK;
4223
4224unmap_frags:
4225 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4226nest_cancel:
4227 rocker_tlv_nest_cancel(desc_info, frags);
4228out:
4229 dev_kfree_skb(skb);
David Ahernf2bbca52015-01-16 14:22:29 -07004230 dev->stats.tx_dropped++;
4231
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004232 return NETDEV_TX_OK;
4233}
4234
4235static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4236{
4237 struct sockaddr *addr = p;
4238 struct rocker_port *rocker_port = netdev_priv(dev);
4239 int err;
4240
4241 if (!is_valid_ether_addr(addr->sa_data))
4242 return -EADDRNOTAVAIL;
4243
4244 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4245 if (err)
4246 return err;
4247 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4248 return 0;
4249}
4250
Scott Feldman77a58c72015-07-08 16:06:47 -07004251static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4252{
4253 struct rocker_port *rocker_port = netdev_priv(dev);
4254 int running = netif_running(dev);
4255 int err;
4256
4257#define ROCKER_PORT_MIN_MTU 68
4258#define ROCKER_PORT_MAX_MTU 9000
4259
4260 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4261 return -EINVAL;
4262
4263 if (running)
4264 rocker_port_stop(dev);
4265
4266 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4267 dev->mtu = new_mtu;
4268
4269 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4270 if (err)
4271 return err;
4272
4273 if (running)
4274 err = rocker_port_open(dev);
4275
4276 return err;
4277}
4278
David Aherndb191702015-03-17 20:23:16 -06004279static int rocker_port_get_phys_port_name(struct net_device *dev,
4280 char *buf, size_t len)
4281{
4282 struct rocker_port *rocker_port = netdev_priv(dev);
4283 struct port_name name = { .buf = buf, .len = len };
4284 int err;
4285
Scott Feldman179f9a22015-06-12 21:35:46 -07004286 err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
David Aherndb191702015-03-17 20:23:16 -06004287 rocker_cmd_get_port_settings_prep, NULL,
4288 rocker_cmd_get_port_settings_phys_name_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004289 &name);
David Aherndb191702015-03-17 20:23:16 -06004290
4291 return err ? -EOPNOTSUPP : 0;
4292}
4293
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004294static int rocker_port_change_proto_down(struct net_device *dev,
4295 bool proto_down)
4296{
4297 struct rocker_port *rocker_port = netdev_priv(dev);
4298
4299 if (rocker_port->dev->flags & IFF_UP)
4300 rocker_port_set_enable(rocker_port, !proto_down);
4301 rocker_port->dev->proto_down = proto_down;
4302 return 0;
4303}
4304
Scott Feldmandd19f832015-08-12 18:45:25 -07004305static void rocker_port_neigh_destroy(struct neighbour *n)
4306{
4307 struct rocker_port *rocker_port = netdev_priv(n->dev);
4308 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4309 __be32 ip_addr = *(__be32 *)n->primary_key;
4310
4311 rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
4312 flags, ip_addr, n->ha);
4313}
4314
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004315static const struct net_device_ops rocker_port_netdev_ops = {
4316 .ndo_open = rocker_port_open,
4317 .ndo_stop = rocker_port_stop,
4318 .ndo_start_xmit = rocker_port_xmit,
4319 .ndo_set_mac_address = rocker_port_set_mac_address,
Scott Feldman77a58c72015-07-08 16:06:47 -07004320 .ndo_change_mtu = rocker_port_change_mtu,
Scott Feldman85fdb952015-05-10 09:48:05 -07004321 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
Scott Feldmanfc8f40d2015-05-10 09:47:57 -07004322 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
Scott Feldman54ba5a02015-05-10 09:48:01 -07004323 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004324 .ndo_fdb_add = switchdev_port_fdb_add,
4325 .ndo_fdb_del = switchdev_port_fdb_del,
4326 .ndo_fdb_dump = switchdev_port_fdb_dump,
David Aherndb191702015-03-17 20:23:16 -06004327 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004328 .ndo_change_proto_down = rocker_port_change_proto_down,
Scott Feldmandd19f832015-08-12 18:45:25 -07004329 .ndo_neigh_destroy = rocker_port_neigh_destroy,
Scott Feldman98237d42015-03-15 21:07:15 -07004330};
4331
4332/********************
4333 * swdev interface
4334 ********************/
4335
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004336static int rocker_port_attr_get(struct net_device *dev,
4337 struct switchdev_attr *attr)
Scott Feldman98237d42015-03-15 21:07:15 -07004338{
Simon Hormane5054642015-05-25 14:28:36 +09004339 const struct rocker_port *rocker_port = netdev_priv(dev);
4340 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman98237d42015-03-15 21:07:15 -07004341
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004342 switch (attr->id) {
4343 case SWITCHDEV_ATTR_PORT_PARENT_ID:
Scott Feldman42275bd2015-05-13 11:16:50 -07004344 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4345 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004346 break;
Scott Feldman6004c862015-05-10 09:47:55 -07004347 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
Scott Feldman42275bd2015-05-13 11:16:50 -07004348 attr->u.brport_flags = rocker_port->brport_flags;
Scott Feldman6004c862015-05-10 09:47:55 -07004349 break;
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004350 default:
4351 return -EOPNOTSUPP;
4352 }
4353
Scott Feldman98237d42015-03-15 21:07:15 -07004354 return 0;
4355}
4356
Simon Hormane5054642015-05-25 14:28:36 +09004357static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
Scott Feldmanc4f20322015-05-10 09:47:50 -07004358{
4359 struct list_head *mem, *tmp;
4360
4361 list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
4362 list_del(mem);
4363 kfree(mem);
4364 }
4365}
4366
Scott Feldman6004c862015-05-10 09:47:55 -07004367static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4368 enum switchdev_trans trans,
4369 unsigned long brport_flags)
4370{
4371 unsigned long orig_flags;
4372 int err = 0;
4373
4374 orig_flags = rocker_port->brport_flags;
4375 rocker_port->brport_flags = brport_flags;
4376 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4377 err = rocker_port_set_learning(rocker_port, trans);
4378
4379 if (trans == SWITCHDEV_TRANS_PREPARE)
4380 rocker_port->brport_flags = orig_flags;
4381
4382 return err;
4383}
4384
Scott Feldmanc4f20322015-05-10 09:47:50 -07004385static int rocker_port_attr_set(struct net_device *dev,
4386 struct switchdev_attr *attr)
4387{
4388 struct rocker_port *rocker_port = netdev_priv(dev);
4389 int err = 0;
4390
4391 switch (attr->trans) {
4392 case SWITCHDEV_TRANS_PREPARE:
4393 BUG_ON(!list_empty(&rocker_port->trans_mem));
4394 break;
4395 case SWITCHDEV_TRANS_ABORT:
4396 rocker_port_trans_abort(rocker_port);
4397 return 0;
4398 default:
4399 break;
4400 }
4401
4402 switch (attr->id) {
Scott Feldman35636062015-05-10 09:47:51 -07004403 case SWITCHDEV_ATTR_PORT_STP_STATE:
Scott Feldmanac283932015-06-12 21:35:48 -07004404 err = rocker_port_stp_update(rocker_port, attr->trans,
4405 ROCKER_OP_FLAG_NOWAIT,
Scott Feldman42275bd2015-05-13 11:16:50 -07004406 attr->u.stp_state);
Scott Feldman35636062015-05-10 09:47:51 -07004407 break;
Scott Feldman6004c862015-05-10 09:47:55 -07004408 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
4409 err = rocker_port_brport_flags_set(rocker_port, attr->trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004410 attr->u.brport_flags);
Scott Feldman6004c862015-05-10 09:47:55 -07004411 break;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004412 default:
4413 err = -EOPNOTSUPP;
4414 break;
4415 }
4416
4417 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004418}
4419
Scott Feldman9228ad22015-05-10 09:47:54 -07004420static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4421 enum switchdev_trans trans, u16 vid, u16 flags)
4422{
4423 int err;
4424
4425 /* XXX deal with flags for PVID and untagged */
4426
4427 err = rocker_port_vlan(rocker_port, trans, 0, vid);
4428 if (err)
4429 return err;
4430
Scott Feldmancec04a62015-06-01 11:39:03 -07004431 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4432 if (err)
4433 rocker_port_vlan(rocker_port, trans,
4434 ROCKER_OP_FLAG_REMOVE, vid);
4435
4436 return err;
Scott Feldman9228ad22015-05-10 09:47:54 -07004437}
4438
4439static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4440 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004441 const struct switchdev_obj_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004442{
4443 u16 vid;
4444 int err;
4445
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004446 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004447 err = rocker_port_vlan_add(rocker_port, trans,
4448 vid, vlan->flags);
4449 if (err)
4450 return err;
4451 }
4452
4453 return 0;
4454}
4455
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004456static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4457 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004458 const struct switchdev_obj_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004459{
4460 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4461 int flags = 0;
4462
4463 if (!rocker_port_is_bridged(rocker_port))
4464 return -EINVAL;
4465
4466 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4467}
4468
Scott Feldman9228ad22015-05-10 09:47:54 -07004469static int rocker_port_obj_add(struct net_device *dev,
4470 struct switchdev_obj *obj)
4471{
4472 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004473 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004474 int err = 0;
4475
4476 switch (obj->trans) {
4477 case SWITCHDEV_TRANS_PREPARE:
4478 BUG_ON(!list_empty(&rocker_port->trans_mem));
4479 break;
4480 case SWITCHDEV_TRANS_ABORT:
4481 rocker_port_trans_abort(rocker_port);
4482 return 0;
4483 default:
4484 break;
4485 }
4486
4487 switch (obj->id) {
4488 case SWITCHDEV_OBJ_PORT_VLAN:
4489 err = rocker_port_vlans_add(rocker_port, obj->trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004490 &obj->u.vlan);
Scott Feldman9228ad22015-05-10 09:47:54 -07004491 break;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004492 case SWITCHDEV_OBJ_IPV4_FIB:
Scott Feldman42275bd2015-05-13 11:16:50 -07004493 fib4 = &obj->u.ipv4_fib;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004494 err = rocker_port_fib_ipv4(rocker_port, obj->trans,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004495 htonl(fib4->dst), fib4->dst_len,
Scott Feldman58c2cb12015-05-10 09:48:06 -07004496 fib4->fi, fib4->tb_id, 0);
4497 break;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004498 case SWITCHDEV_OBJ_PORT_FDB:
4499 err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
4500 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004501 default:
4502 err = -EOPNOTSUPP;
4503 break;
4504 }
4505
4506 return err;
4507}
4508
4509static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4510 u16 vid, u16 flags)
4511{
4512 int err;
4513
4514 err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
4515 ROCKER_OP_FLAG_REMOVE, htons(vid));
4516 if (err)
4517 return err;
4518
4519 return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
4520 ROCKER_OP_FLAG_REMOVE, vid);
4521}
4522
4523static int rocker_port_vlans_del(struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004524 const struct switchdev_obj_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004525{
4526 u16 vid;
4527 int err;
4528
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004529 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004530 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4531 if (err)
4532 return err;
4533 }
4534
4535 return 0;
4536}
4537
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004538static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4539 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004540 const struct switchdev_obj_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004541{
4542 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
Scott Feldmanb4ad7ba2015-06-14 11:33:11 -07004543 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004544
4545 if (!rocker_port_is_bridged(rocker_port))
4546 return -EINVAL;
4547
4548 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4549}
4550
Scott Feldman9228ad22015-05-10 09:47:54 -07004551static int rocker_port_obj_del(struct net_device *dev,
4552 struct switchdev_obj *obj)
4553{
4554 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004555 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004556 int err = 0;
4557
4558 switch (obj->id) {
4559 case SWITCHDEV_OBJ_PORT_VLAN:
Scott Feldman42275bd2015-05-13 11:16:50 -07004560 err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
Scott Feldman9228ad22015-05-10 09:47:54 -07004561 break;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004562 case SWITCHDEV_OBJ_IPV4_FIB:
Scott Feldman42275bd2015-05-13 11:16:50 -07004563 fib4 = &obj->u.ipv4_fib;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004564 err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004565 htonl(fib4->dst), fib4->dst_len,
4566 fib4->fi, fib4->tb_id,
4567 ROCKER_OP_FLAG_REMOVE);
Scott Feldman58c2cb12015-05-10 09:48:06 -07004568 break;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004569 case SWITCHDEV_OBJ_PORT_FDB:
4570 err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
4571 break;
4572 default:
4573 err = -EOPNOTSUPP;
4574 break;
4575 }
4576
4577 return err;
4578}
4579
Simon Hormane5054642015-05-25 14:28:36 +09004580static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004581 struct switchdev_obj *obj)
4582{
4583 struct rocker *rocker = rocker_port->rocker;
4584 struct switchdev_obj_fdb *fdb = &obj->u.fdb;
4585 struct rocker_fdb_tbl_entry *found;
4586 struct hlist_node *tmp;
4587 unsigned long lock_flags;
4588 int bkt;
4589 int err = 0;
4590
4591 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4592 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07004593 if (found->key.rocker_port != rocker_port)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004594 continue;
David S. Millercdf09692015-08-11 12:00:37 -07004595 fdb->addr = found->key.addr;
Vivien Didelotce80e7b2015-08-10 09:09:52 -04004596 fdb->ndm_state = NUD_REACHABLE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004597 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4598 found->key.vlan_id);
4599 err = obj->cb(rocker_port->dev, obj);
4600 if (err)
4601 break;
4602 }
4603 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4604
4605 return err;
4606}
4607
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004608static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4609 struct switchdev_obj *obj)
4610{
4611 struct switchdev_obj_vlan *vlan = &obj->u.vlan;
4612 u16 vid;
4613 int err = 0;
4614
4615 for (vid = 1; vid < VLAN_N_VID; vid++) {
4616 if (!test_bit(vid, rocker_port->vlan_bitmap))
4617 continue;
4618 vlan->flags = 0;
4619 if (rocker_vlan_id_is_internal(htons(vid)))
4620 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4621 vlan->vid_begin = vlan->vid_end = vid;
4622 err = obj->cb(rocker_port->dev, obj);
4623 if (err)
4624 break;
4625 }
4626
4627 return err;
4628}
4629
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004630static int rocker_port_obj_dump(struct net_device *dev,
4631 struct switchdev_obj *obj)
4632{
Simon Hormane5054642015-05-25 14:28:36 +09004633 const struct rocker_port *rocker_port = netdev_priv(dev);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004634 int err = 0;
4635
4636 switch (obj->id) {
4637 case SWITCHDEV_OBJ_PORT_FDB:
4638 err = rocker_port_fdb_dump(rocker_port, obj);
4639 break;
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004640 case SWITCHDEV_OBJ_PORT_VLAN:
4641 err = rocker_port_vlan_dump(rocker_port, obj);
4642 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004643 default:
4644 err = -EOPNOTSUPP;
4645 break;
4646 }
4647
4648 return err;
4649}
4650
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004651static const struct switchdev_ops rocker_port_switchdev_ops = {
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004652 .switchdev_port_attr_get = rocker_port_attr_get,
Scott Feldman35636062015-05-10 09:47:51 -07004653 .switchdev_port_attr_set = rocker_port_attr_set,
Scott Feldman9228ad22015-05-10 09:47:54 -07004654 .switchdev_port_obj_add = rocker_port_obj_add,
4655 .switchdev_port_obj_del = rocker_port_obj_del,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004656 .switchdev_port_obj_dump = rocker_port_obj_dump,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004657};
4658
4659/********************
4660 * ethtool interface
4661 ********************/
4662
4663static int rocker_port_get_settings(struct net_device *dev,
4664 struct ethtool_cmd *ecmd)
4665{
4666 struct rocker_port *rocker_port = netdev_priv(dev);
4667
4668 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4669}
4670
4671static int rocker_port_set_settings(struct net_device *dev,
4672 struct ethtool_cmd *ecmd)
4673{
4674 struct rocker_port *rocker_port = netdev_priv(dev);
4675
4676 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4677}
4678
4679static void rocker_port_get_drvinfo(struct net_device *dev,
4680 struct ethtool_drvinfo *drvinfo)
4681{
4682 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4683 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4684}
4685
David Ahern9766e972015-01-29 20:59:33 -07004686static struct rocker_port_stats {
4687 char str[ETH_GSTRING_LEN];
4688 int type;
4689} rocker_port_stats[] = {
4690 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4691 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4692 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4693 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4694
4695 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4696 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4697 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4698 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4699};
4700
4701#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4702
4703static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4704 u8 *data)
4705{
4706 u8 *p = data;
4707 int i;
4708
4709 switch (stringset) {
4710 case ETH_SS_STATS:
4711 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4712 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4713 p += ETH_GSTRING_LEN;
4714 }
4715 break;
4716 }
4717}
4718
4719static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004720rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
David Ahern9766e972015-01-29 20:59:33 -07004721 struct rocker_desc_info *desc_info,
4722 void *priv)
4723{
4724 struct rocker_tlv *cmd_stats;
4725
4726 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4727 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4728 return -EMSGSIZE;
4729
4730 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4731 if (!cmd_stats)
4732 return -EMSGSIZE;
4733
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004734 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4735 rocker_port->pport))
David Ahern9766e972015-01-29 20:59:33 -07004736 return -EMSGSIZE;
4737
4738 rocker_tlv_nest_end(desc_info, cmd_stats);
4739
4740 return 0;
4741}
4742
4743static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004744rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004745 const struct rocker_desc_info *desc_info,
David Ahern9766e972015-01-29 20:59:33 -07004746 void *priv)
4747{
Simon Hormane5054642015-05-25 14:28:36 +09004748 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4749 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4750 const struct rocker_tlv *pattr;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004751 u32 pport;
David Ahern9766e972015-01-29 20:59:33 -07004752 u64 *data = priv;
4753 int i;
4754
4755 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4756
4757 if (!attrs[ROCKER_TLV_CMD_INFO])
4758 return -EIO;
4759
4760 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4761 attrs[ROCKER_TLV_CMD_INFO]);
4762
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004763 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
David Ahern9766e972015-01-29 20:59:33 -07004764 return -EIO;
4765
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004766 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4767 if (pport != rocker_port->pport)
David Ahern9766e972015-01-29 20:59:33 -07004768 return -EIO;
4769
4770 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4771 pattr = stats_attrs[rocker_port_stats[i].type];
4772 if (!pattr)
4773 continue;
4774
4775 data[i] = rocker_tlv_get_u64(pattr);
4776 }
4777
4778 return 0;
4779}
4780
4781static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4782 void *priv)
4783{
Scott Feldman179f9a22015-06-12 21:35:46 -07004784 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
David Ahern9766e972015-01-29 20:59:33 -07004785 rocker_cmd_get_port_stats_prep, NULL,
4786 rocker_cmd_get_port_stats_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004787 priv);
David Ahern9766e972015-01-29 20:59:33 -07004788}
4789
4790static void rocker_port_get_stats(struct net_device *dev,
4791 struct ethtool_stats *stats, u64 *data)
4792{
4793 struct rocker_port *rocker_port = netdev_priv(dev);
4794
4795 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4796 int i;
4797
4798 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4799 data[i] = 0;
4800 }
David Ahern9766e972015-01-29 20:59:33 -07004801}
4802
4803static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4804{
4805 switch (sset) {
4806 case ETH_SS_STATS:
4807 return ROCKER_PORT_STATS_LEN;
4808 default:
4809 return -EOPNOTSUPP;
4810 }
4811}
4812
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004813static const struct ethtool_ops rocker_port_ethtool_ops = {
4814 .get_settings = rocker_port_get_settings,
4815 .set_settings = rocker_port_set_settings,
4816 .get_drvinfo = rocker_port_get_drvinfo,
4817 .get_link = ethtool_op_get_link,
David Ahern9766e972015-01-29 20:59:33 -07004818 .get_strings = rocker_port_get_strings,
4819 .get_ethtool_stats = rocker_port_get_stats,
4820 .get_sset_count = rocker_port_get_sset_count,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004821};
4822
4823/*****************
4824 * NAPI interface
4825 *****************/
4826
4827static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4828{
4829 return container_of(napi, struct rocker_port, napi_tx);
4830}
4831
4832static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4833{
4834 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09004835 const struct rocker *rocker = rocker_port->rocker;
4836 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004837 u32 credits = 0;
4838 int err;
4839
4840 /* Cleanup tx descriptors */
4841 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
David Ahernf2bbca52015-01-16 14:22:29 -07004842 struct sk_buff *skb;
4843
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004844 err = rocker_desc_err(desc_info);
4845 if (err && net_ratelimit())
4846 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4847 err);
4848 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
David Ahernf2bbca52015-01-16 14:22:29 -07004849
4850 skb = rocker_desc_cookie_ptr_get(desc_info);
4851 if (err == 0) {
4852 rocker_port->dev->stats.tx_packets++;
4853 rocker_port->dev->stats.tx_bytes += skb->len;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004854 } else {
David Ahernf2bbca52015-01-16 14:22:29 -07004855 rocker_port->dev->stats.tx_errors++;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004856 }
David Ahernf2bbca52015-01-16 14:22:29 -07004857
4858 dev_kfree_skb_any(skb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004859 credits++;
4860 }
4861
4862 if (credits && netif_queue_stopped(rocker_port->dev))
4863 netif_wake_queue(rocker_port->dev);
4864
4865 napi_complete(napi);
4866 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4867
4868 return 0;
4869}
4870
Simon Hormane5054642015-05-25 14:28:36 +09004871static int rocker_port_rx_proc(const struct rocker *rocker,
4872 const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004873 struct rocker_desc_info *desc_info)
4874{
Simon Hormane5054642015-05-25 14:28:36 +09004875 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004876 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4877 size_t rx_len;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004878 u16 rx_flags = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004879
4880 if (!skb)
4881 return -ENOENT;
4882
4883 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4884 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4885 return -EINVAL;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004886 if (attrs[ROCKER_TLV_RX_FLAGS])
4887 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004888
4889 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4890
4891 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4892 skb_put(skb, rx_len);
4893 skb->protocol = eth_type_trans(skb, rocker_port->dev);
David Ahernf2bbca52015-01-16 14:22:29 -07004894
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004895 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4896 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4897
David Ahernf2bbca52015-01-16 14:22:29 -07004898 rocker_port->dev->stats.rx_packets++;
4899 rocker_port->dev->stats.rx_bytes += skb->len;
4900
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004901 netif_receive_skb(skb);
4902
Simon Horman534ba6a2015-06-01 13:25:04 +09004903 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004904}
4905
4906static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4907{
4908 return container_of(napi, struct rocker_port, napi_rx);
4909}
4910
4911static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4912{
4913 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09004914 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004915 struct rocker_desc_info *desc_info;
4916 u32 credits = 0;
4917 int err;
4918
4919 /* Process rx descriptors */
4920 while (credits < budget &&
4921 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4922 err = rocker_desc_err(desc_info);
4923 if (err) {
4924 if (net_ratelimit())
4925 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4926 err);
4927 } else {
4928 err = rocker_port_rx_proc(rocker, rocker_port,
4929 desc_info);
4930 if (err && net_ratelimit())
4931 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4932 err);
4933 }
David Ahernf2bbca52015-01-16 14:22:29 -07004934 if (err)
4935 rocker_port->dev->stats.rx_errors++;
4936
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004937 rocker_desc_gen_clear(desc_info);
4938 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4939 credits++;
4940 }
4941
4942 if (credits < budget)
4943 napi_complete(napi);
4944
4945 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4946
4947 return credits;
4948}
4949
4950/*****************
4951 * PCI driver ops
4952 *****************/
4953
Simon Hormane5054642015-05-25 14:28:36 +09004954static void rocker_carrier_init(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004955{
Simon Hormane5054642015-05-25 14:28:36 +09004956 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004957 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4958 bool link_up;
4959
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004960 link_up = link_status & (1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004961 if (link_up)
4962 netif_carrier_on(rocker_port->dev);
4963 else
4964 netif_carrier_off(rocker_port->dev);
4965}
4966
Simon Hormane5054642015-05-25 14:28:36 +09004967static void rocker_remove_ports(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004968{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004969 struct rocker_port *rocker_port;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004970 int i;
4971
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004972 for (i = 0; i < rocker->port_count; i++) {
4973 rocker_port = rocker->ports[i];
Scott Feldmana0720312015-06-12 21:09:44 -07004974 if (!rocker_port)
4975 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004976 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4977 ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004978 unregister_netdev(rocker_port->dev);
Ido Schimmel1ebd47e2015-08-02 19:29:16 +02004979 free_netdev(rocker_port->dev);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004980 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004981 kfree(rocker->ports);
4982}
4983
Simon Horman534ba6a2015-06-01 13:25:04 +09004984static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004985{
Simon Horman534ba6a2015-06-01 13:25:04 +09004986 const struct rocker *rocker = rocker_port->rocker;
Simon Hormane5054642015-05-25 14:28:36 +09004987 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004988 int err;
4989
4990 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4991 rocker_port->dev->dev_addr);
4992 if (err) {
4993 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4994 eth_hw_addr_random(rocker_port->dev);
4995 }
4996}
4997
4998static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4999{
Simon Hormane5054642015-05-25 14:28:36 +09005000 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005001 struct rocker_port *rocker_port;
5002 struct net_device *dev;
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005003 u16 untagged_vid = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005004 int err;
5005
5006 dev = alloc_etherdev(sizeof(struct rocker_port));
5007 if (!dev)
5008 return -ENOMEM;
5009 rocker_port = netdev_priv(dev);
5010 rocker_port->dev = dev;
5011 rocker_port->rocker = rocker;
5012 rocker_port->port_number = port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08005013 rocker_port->pport = port_number + 1;
Scott Feldman5111f802014-11-28 14:34:30 +01005014 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
Scott Feldmane7335702015-09-23 08:39:17 -07005015 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
Scott Feldmanc4f20322015-05-10 09:47:50 -07005016 INIT_LIST_HEAD(&rocker_port->trans_mem);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005017
Simon Horman534ba6a2015-06-01 13:25:04 +09005018 rocker_port_dev_addr_init(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005019 dev->netdev_ops = &rocker_port_netdev_ops;
5020 dev->ethtool_ops = &rocker_port_ethtool_ops;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07005021 dev->switchdev_ops = &rocker_port_switchdev_ops;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005022 netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
5023 NAPI_POLL_WEIGHT);
5024 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5025 NAPI_POLL_WEIGHT);
5026 rocker_carrier_init(rocker_port);
5027
Ido Schimmel21518a62015-08-02 20:56:37 +02005028 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005029
5030 err = register_netdev(dev);
5031 if (err) {
5032 dev_err(&pdev->dev, "register_netdev failed\n");
5033 goto err_register_netdev;
5034 }
5035 rocker->ports[port_number] = rocker_port;
5036
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005037 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5038
Scott Feldmanc4f20322015-05-10 09:47:50 -07005039 rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
Scott Feldman5111f802014-11-28 14:34:30 +01005040
Scott Feldmanc4f20322015-05-10 09:47:50 -07005041 err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005042 if (err) {
Scott Feldmanff147022015-08-03 22:31:18 -07005043 netdev_err(rocker_port->dev, "install ig port table failed\n");
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005044 goto err_port_ig_tbl;
5045 }
5046
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005047 rocker_port->internal_vlan_id =
5048 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5049
5050 err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5051 untagged_vid, 0);
5052 if (err) {
5053 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5054 goto err_untagged_vlan;
5055 }
5056
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005057 return 0;
5058
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005059err_untagged_vlan:
5060 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
5061 ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005062err_port_ig_tbl:
Scott Feldman6c4f7782015-08-03 22:31:17 -07005063 rocker->ports[port_number] = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005064 unregister_netdev(dev);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005065err_register_netdev:
5066 free_netdev(dev);
5067 return err;
5068}
5069
5070static int rocker_probe_ports(struct rocker *rocker)
5071{
5072 int i;
5073 size_t alloc_size;
5074 int err;
5075
5076 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
Scott Feldman27b808c2015-06-01 11:39:02 -07005077 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
Dan Carpentere65ad3b2015-02-25 16:35:32 +03005078 if (!rocker->ports)
5079 return -ENOMEM;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005080 for (i = 0; i < rocker->port_count; i++) {
5081 err = rocker_probe_port(rocker, i);
5082 if (err)
5083 goto remove_ports;
5084 }
5085 return 0;
5086
5087remove_ports:
5088 rocker_remove_ports(rocker);
5089 return err;
5090}
5091
5092static int rocker_msix_init(struct rocker *rocker)
5093{
5094 struct pci_dev *pdev = rocker->pdev;
5095 int msix_entries;
5096 int i;
5097 int err;
5098
5099 msix_entries = pci_msix_vec_count(pdev);
5100 if (msix_entries < 0)
5101 return msix_entries;
5102
5103 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5104 return -EINVAL;
5105
5106 rocker->msix_entries = kmalloc_array(msix_entries,
5107 sizeof(struct msix_entry),
5108 GFP_KERNEL);
5109 if (!rocker->msix_entries)
5110 return -ENOMEM;
5111
5112 for (i = 0; i < msix_entries; i++)
5113 rocker->msix_entries[i].entry = i;
5114
5115 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5116 if (err < 0)
5117 goto err_enable_msix;
5118
5119 return 0;
5120
5121err_enable_msix:
5122 kfree(rocker->msix_entries);
5123 return err;
5124}
5125
Simon Hormane5054642015-05-25 14:28:36 +09005126static void rocker_msix_fini(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005127{
5128 pci_disable_msix(rocker->pdev);
5129 kfree(rocker->msix_entries);
5130}
5131
5132static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5133{
5134 struct rocker *rocker;
5135 int err;
5136
5137 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5138 if (!rocker)
5139 return -ENOMEM;
5140
5141 err = pci_enable_device(pdev);
5142 if (err) {
5143 dev_err(&pdev->dev, "pci_enable_device failed\n");
5144 goto err_pci_enable_device;
5145 }
5146
5147 err = pci_request_regions(pdev, rocker_driver_name);
5148 if (err) {
5149 dev_err(&pdev->dev, "pci_request_regions failed\n");
5150 goto err_pci_request_regions;
5151 }
5152
5153 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5154 if (!err) {
5155 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5156 if (err) {
5157 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5158 goto err_pci_set_dma_mask;
5159 }
5160 } else {
5161 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5162 if (err) {
5163 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5164 goto err_pci_set_dma_mask;
5165 }
5166 }
5167
5168 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5169 dev_err(&pdev->dev, "invalid PCI region size\n");
Wei Yongjun3122a922015-04-16 20:21:02 +08005170 err = -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005171 goto err_pci_resource_len_check;
5172 }
5173
5174 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5175 pci_resource_len(pdev, 0));
5176 if (!rocker->hw_addr) {
5177 dev_err(&pdev->dev, "ioremap failed\n");
5178 err = -EIO;
5179 goto err_ioremap;
5180 }
5181 pci_set_master(pdev);
5182
5183 rocker->pdev = pdev;
5184 pci_set_drvdata(pdev, rocker);
5185
5186 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5187
5188 err = rocker_msix_init(rocker);
5189 if (err) {
5190 dev_err(&pdev->dev, "MSI-X init failed\n");
5191 goto err_msix_init;
5192 }
5193
5194 err = rocker_basic_hw_test(rocker);
5195 if (err) {
5196 dev_err(&pdev->dev, "basic hw test failed\n");
5197 goto err_basic_hw_test;
5198 }
5199
5200 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5201
5202 err = rocker_dma_rings_init(rocker);
5203 if (err)
5204 goto err_dma_rings_init;
5205
5206 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5207 rocker_cmd_irq_handler, 0,
5208 rocker_driver_name, rocker);
5209 if (err) {
5210 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5211 goto err_request_cmd_irq;
5212 }
5213
5214 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5215 rocker_event_irq_handler, 0,
5216 rocker_driver_name, rocker);
5217 if (err) {
5218 dev_err(&pdev->dev, "cannot assign event irq\n");
5219 goto err_request_event_irq;
5220 }
5221
5222 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5223
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005224 err = rocker_init_tbls(rocker);
5225 if (err) {
5226 dev_err(&pdev->dev, "cannot init rocker tables\n");
5227 goto err_init_tbls;
5228 }
5229
Scott Feldman52fe3e22015-09-23 08:39:18 -07005230 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5231 (unsigned long) rocker);
5232 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5233
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005234 err = rocker_probe_ports(rocker);
5235 if (err) {
5236 dev_err(&pdev->dev, "failed to probe ports\n");
5237 goto err_probe_ports;
5238 }
5239
Scott Feldmanc8beb5b2015-08-12 18:44:13 -07005240 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5241 (int)sizeof(rocker->hw.id), &rocker->hw.id);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005242
5243 return 0;
5244
5245err_probe_ports:
Scott Feldman52fe3e22015-09-23 08:39:18 -07005246 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005247 rocker_free_tbls(rocker);
5248err_init_tbls:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005249 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5250err_request_event_irq:
5251 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5252err_request_cmd_irq:
5253 rocker_dma_rings_fini(rocker);
5254err_dma_rings_init:
5255err_basic_hw_test:
5256 rocker_msix_fini(rocker);
5257err_msix_init:
5258 iounmap(rocker->hw_addr);
5259err_ioremap:
5260err_pci_resource_len_check:
5261err_pci_set_dma_mask:
5262 pci_release_regions(pdev);
5263err_pci_request_regions:
5264 pci_disable_device(pdev);
5265err_pci_enable_device:
5266 kfree(rocker);
5267 return err;
5268}
5269
5270static void rocker_remove(struct pci_dev *pdev)
5271{
5272 struct rocker *rocker = pci_get_drvdata(pdev);
5273
Scott Feldman52fe3e22015-09-23 08:39:18 -07005274 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005275 rocker_free_tbls(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005276 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5277 rocker_remove_ports(rocker);
5278 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5279 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5280 rocker_dma_rings_fini(rocker);
5281 rocker_msix_fini(rocker);
5282 iounmap(rocker->hw_addr);
5283 pci_release_regions(rocker->pdev);
5284 pci_disable_device(rocker->pdev);
5285 kfree(rocker);
5286}
5287
5288static struct pci_driver rocker_pci_driver = {
5289 .name = rocker_driver_name,
5290 .id_table = rocker_pci_id_table,
5291 .probe = rocker_probe,
5292 .remove = rocker_remove,
5293};
5294
Scott Feldman6c707942014-11-28 14:34:28 +01005295/************************************
5296 * Net device notifier event handler
5297 ************************************/
5298
Simon Hormane5054642015-05-25 14:28:36 +09005299static bool rocker_port_dev_check(const struct net_device *dev)
Scott Feldman6c707942014-11-28 14:34:28 +01005300{
5301 return dev->netdev_ops == &rocker_port_netdev_ops;
5302}
5303
5304static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5305 struct net_device *bridge)
5306{
Scott Feldman027e00d2015-06-01 11:39:05 -07005307 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005308 int err;
5309
Scott Feldman027e00d2015-06-01 11:39:05 -07005310 /* Port is joining bridge, so the internal VLAN for the
5311 * port is going to change to the bridge internal VLAN.
5312 * Let's remove untagged VLAN (vid=0) from port and
5313 * re-add once internal VLAN has changed.
5314 */
5315
5316 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5317 if (err)
5318 return err;
5319
Simon Hormandf6a2062015-05-21 12:40:17 +09005320 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005321 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005322 rocker_port->internal_vlan_id =
5323 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005324
5325 rocker_port->bridge_dev = bridge;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005326 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
Scott Feldman6c707942014-11-28 14:34:28 +01005327
Scott Feldman027e00d2015-06-01 11:39:05 -07005328 return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5329 untagged_vid, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005330}
5331
5332static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5333{
Scott Feldman027e00d2015-06-01 11:39:05 -07005334 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005335 int err;
5336
Scott Feldman027e00d2015-06-01 11:39:05 -07005337 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5338 if (err)
5339 return err;
5340
Simon Hormandf6a2062015-05-21 12:40:17 +09005341 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005342 rocker_port->bridge_dev->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005343 rocker_port->internal_vlan_id =
5344 rocker_port_internal_vlan_id_get(rocker_port,
5345 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005346
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005347 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5348 false);
Scott Feldman027e00d2015-06-01 11:39:05 -07005349 rocker_port->bridge_dev = NULL;
5350
5351 err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5352 untagged_vid, 0);
Scott Feldmane47172a2015-02-25 20:15:38 -08005353 if (err)
5354 return err;
5355
5356 if (rocker_port->dev->flags & IFF_UP)
Scott Feldman179f9a22015-06-12 21:35:46 -07005357 err = rocker_port_fwd_enable(rocker_port,
5358 SWITCHDEV_TRANS_NONE, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005359
5360 return err;
5361}
5362
Simon Horman82549732015-07-16 10:39:14 +09005363
5364static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5365 struct net_device *master)
5366{
5367 int err;
5368
5369 rocker_port->bridge_dev = master;
5370
5371 err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
5372 if (err)
5373 return err;
5374 err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
5375
5376 return err;
5377}
5378
Jiri Pirko686ed302015-08-27 09:31:23 +02005379static int rocker_port_master_linked(struct rocker_port *rocker_port,
5380 struct net_device *master)
Scott Feldman6c707942014-11-28 14:34:28 +01005381{
Scott Feldman6c707942014-11-28 14:34:28 +01005382 int err = 0;
5383
Jiri Pirko686ed302015-08-27 09:31:23 +02005384 if (netif_is_bridge_master(master))
5385 err = rocker_port_bridge_join(rocker_port, master);
5386 else if (netif_is_ovs_master(master))
5387 err = rocker_port_ovs_changed(rocker_port, master);
5388 return err;
5389}
Scott Feldman6c707942014-11-28 14:34:28 +01005390
Jiri Pirko686ed302015-08-27 09:31:23 +02005391static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5392{
5393 int err = 0;
5394
5395 if (rocker_port_is_bridged(rocker_port))
5396 err = rocker_port_bridge_leave(rocker_port);
5397 else if (rocker_port_is_ovsed(rocker_port))
5398 err = rocker_port_ovs_changed(rocker_port, NULL);
Scott Feldman6c707942014-11-28 14:34:28 +01005399 return err;
5400}
5401
5402static int rocker_netdevice_event(struct notifier_block *unused,
5403 unsigned long event, void *ptr)
5404{
Jiri Pirko686ed302015-08-27 09:31:23 +02005405 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5406 struct netdev_notifier_changeupper_info *info;
5407 struct rocker_port *rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01005408 int err;
5409
Jiri Pirko686ed302015-08-27 09:31:23 +02005410 if (!rocker_port_dev_check(dev))
5411 return NOTIFY_DONE;
5412
Scott Feldman6c707942014-11-28 14:34:28 +01005413 switch (event) {
5414 case NETDEV_CHANGEUPPER:
Jiri Pirko686ed302015-08-27 09:31:23 +02005415 info = ptr;
5416 if (!info->master)
5417 goto out;
5418 rocker_port = netdev_priv(dev);
5419 if (info->linking) {
5420 err = rocker_port_master_linked(rocker_port,
5421 info->upper_dev);
5422 if (err)
5423 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5424 err);
5425 } else {
5426 err = rocker_port_master_unlinked(rocker_port);
5427 if (err)
5428 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5429 err);
5430 }
Scott Feldman6c707942014-11-28 14:34:28 +01005431 break;
5432 }
Jiri Pirko686ed302015-08-27 09:31:23 +02005433out:
Scott Feldman6c707942014-11-28 14:34:28 +01005434 return NOTIFY_DONE;
5435}
5436
5437static struct notifier_block rocker_netdevice_nb __read_mostly = {
5438 .notifier_call = rocker_netdevice_event,
5439};
5440
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005441/************************************
5442 * Net event notifier event handler
5443 ************************************/
5444
5445static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5446{
5447 struct rocker_port *rocker_port = netdev_priv(dev);
Scott Feldman02a9fbf2015-06-12 21:35:47 -07005448 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5449 ROCKER_OP_FLAG_NOWAIT;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005450 __be32 ip_addr = *(__be32 *)n->primary_key;
5451
Scott Feldmanc4f20322015-05-10 09:47:50 -07005452 return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
5453 flags, ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005454}
5455
5456static int rocker_netevent_event(struct notifier_block *unused,
5457 unsigned long event, void *ptr)
5458{
5459 struct net_device *dev;
5460 struct neighbour *n = ptr;
5461 int err;
5462
5463 switch (event) {
5464 case NETEVENT_NEIGH_UPDATE:
5465 if (n->tbl != &arp_tbl)
5466 return NOTIFY_DONE;
5467 dev = n->dev;
5468 if (!rocker_port_dev_check(dev))
5469 return NOTIFY_DONE;
5470 err = rocker_neigh_update(dev, n);
5471 if (err)
5472 netdev_warn(dev,
5473 "failed to handle neigh update (err %d)\n",
5474 err);
5475 break;
5476 }
5477
5478 return NOTIFY_DONE;
5479}
5480
5481static struct notifier_block rocker_netevent_nb __read_mostly = {
5482 .notifier_call = rocker_netevent_event,
5483};
5484
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005485/***********************
5486 * Module init and exit
5487 ***********************/
5488
5489static int __init rocker_module_init(void)
5490{
Scott Feldman6c707942014-11-28 14:34:28 +01005491 int err;
5492
5493 register_netdevice_notifier(&rocker_netdevice_nb);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005494 register_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005495 err = pci_register_driver(&rocker_pci_driver);
5496 if (err)
5497 goto err_pci_register_driver;
5498 return 0;
5499
5500err_pci_register_driver:
Gilad Ben-Yossefa076e6b2015-06-23 10:52:10 +03005501 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005502 unregister_netdevice_notifier(&rocker_netdevice_nb);
5503 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005504}
5505
5506static void __exit rocker_module_exit(void)
5507{
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005508 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005509 unregister_netdevice_notifier(&rocker_netdevice_nb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005510 pci_unregister_driver(&rocker_pci_driver);
5511}
5512
5513module_init(rocker_module_init);
5514module_exit(rocker_module_exit);
5515
5516MODULE_LICENSE("GPL v2");
5517MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5518MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5519MODULE_DESCRIPTION("Rocker switch device driver");
5520MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);