blob: f55ed2c2d20f328a1919a0358949fe07c31ea549 [file] [log] [blame]
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010019#include <linux/hashtable.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010020#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
Scott Feldman6c707942014-11-28 14:34:28 +010031#include <linux/if_bridge.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010032#include <linux/bitops.h>
David Aherndb191702015-03-17 20:23:16 -060033#include <linux/ctype.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010034#include <net/switchdev.h>
35#include <net/rtnetlink.h>
Scott Feldmanc1beeef2015-03-05 21:21:20 -080036#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010039#include <asm-generic/io-64-nonatomic-lo-hi.h>
40#include <generated/utsrelease.h>
41
42#include "rocker.h"
43
44static const char rocker_driver_name[] = "rocker";
45
46static const struct pci_device_id rocker_pci_id_table[] = {
47 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
48 {0, }
49};
50
Scott Feldman9f6bbf72014-11-28 14:34:27 +010051struct rocker_flow_tbl_key {
52 u32 priority;
53 enum rocker_of_dpa_table_id tbl_id;
54 union {
55 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080056 u32 in_pport;
57 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010058 enum rocker_of_dpa_table_id goto_tbl;
59 } ig_port;
60 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080061 u32 in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010062 __be16 vlan_id;
63 __be16 vlan_id_mask;
64 enum rocker_of_dpa_table_id goto_tbl;
65 bool untagged;
66 __be16 new_vlan_id;
67 } vlan;
68 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080069 u32 in_pport;
70 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010071 __be16 eth_type;
72 u8 eth_dst[ETH_ALEN];
73 u8 eth_dst_mask[ETH_ALEN];
74 __be16 vlan_id;
75 __be16 vlan_id_mask;
76 enum rocker_of_dpa_table_id goto_tbl;
77 bool copy_to_cpu;
78 } term_mac;
79 struct {
80 __be16 eth_type;
81 __be32 dst4;
82 __be32 dst4_mask;
83 enum rocker_of_dpa_table_id goto_tbl;
84 u32 group_id;
85 } ucast_routing;
86 struct {
87 u8 eth_dst[ETH_ALEN];
88 u8 eth_dst_mask[ETH_ALEN];
89 int has_eth_dst;
90 int has_eth_dst_mask;
91 __be16 vlan_id;
92 u32 tunnel_id;
93 enum rocker_of_dpa_table_id goto_tbl;
94 u32 group_id;
95 bool copy_to_cpu;
96 } bridge;
97 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080098 u32 in_pport;
99 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100100 u8 eth_src[ETH_ALEN];
101 u8 eth_src_mask[ETH_ALEN];
102 u8 eth_dst[ETH_ALEN];
103 u8 eth_dst_mask[ETH_ALEN];
104 __be16 eth_type;
105 __be16 vlan_id;
106 __be16 vlan_id_mask;
107 u8 ip_proto;
108 u8 ip_proto_mask;
109 u8 ip_tos;
110 u8 ip_tos_mask;
111 u32 group_id;
112 } acl;
113 };
114};
115
116struct rocker_flow_tbl_entry {
117 struct hlist_node entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800118 u32 cmd;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100119 u64 cookie;
120 struct rocker_flow_tbl_key key;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800121 size_t key_len;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100122 u32 key_crc32; /* key */
123};
124
125struct rocker_group_tbl_entry {
126 struct hlist_node entry;
127 u32 cmd;
128 u32 group_id; /* key */
129 u16 group_count;
130 u32 *group_ids;
131 union {
132 struct {
133 u8 pop_vlan;
134 } l2_interface;
135 struct {
136 u8 eth_src[ETH_ALEN];
137 u8 eth_dst[ETH_ALEN];
138 __be16 vlan_id;
139 u32 group_id;
140 } l2_rewrite;
141 struct {
142 u8 eth_src[ETH_ALEN];
143 u8 eth_dst[ETH_ALEN];
144 __be16 vlan_id;
145 bool ttl_check;
146 u32 group_id;
147 } l3_unicast;
148 };
149};
150
151struct rocker_fdb_tbl_entry {
152 struct hlist_node entry;
153 u32 key_crc32; /* key */
154 bool learned;
Scott Feldmana471be42015-09-23 08:39:14 -0700155 unsigned long touched;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100156 struct rocker_fdb_tbl_key {
Scott Feldman4c660492015-09-23 08:39:15 -0700157 struct rocker_port *rocker_port;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100158 u8 addr[ETH_ALEN];
159 __be16 vlan_id;
160 } key;
161};
162
163struct rocker_internal_vlan_tbl_entry {
164 struct hlist_node entry;
165 int ifindex; /* key */
166 u32 ref_count;
167 __be16 vlan_id;
168};
169
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800170struct rocker_neigh_tbl_entry {
171 struct hlist_node entry;
172 __be32 ip_addr; /* key */
173 struct net_device *dev;
174 u32 ref_count;
175 u32 index;
176 u8 eth_dst[ETH_ALEN];
177 bool ttl_check;
178};
179
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100180struct rocker_desc_info {
181 char *data; /* mapped */
182 size_t data_size;
183 size_t tlv_size;
184 struct rocker_desc *desc;
Scott Feldman4725ceb2015-05-10 09:48:08 -0700185 dma_addr_t mapaddr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100186};
187
188struct rocker_dma_ring_info {
189 size_t size;
190 u32 head;
191 u32 tail;
192 struct rocker_desc *desc; /* mapped */
193 dma_addr_t mapaddr;
194 struct rocker_desc_info *desc_info;
195 unsigned int type;
196};
197
198struct rocker;
199
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100200enum {
201 ROCKER_CTRL_LINK_LOCAL_MCAST,
202 ROCKER_CTRL_LOCAL_ARP,
203 ROCKER_CTRL_IPV4_MCAST,
204 ROCKER_CTRL_IPV6_MCAST,
205 ROCKER_CTRL_DFLT_BRIDGING,
Simon Horman82549732015-07-16 10:39:14 +0900206 ROCKER_CTRL_DFLT_OVS,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100207 ROCKER_CTRL_MAX,
208};
209
210#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
211#define ROCKER_N_INTERNAL_VLANS 255
212#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
213#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
214
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100215struct rocker_port {
216 struct net_device *dev;
Scott Feldman6c707942014-11-28 14:34:28 +0100217 struct net_device *bridge_dev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100218 struct rocker *rocker;
219 unsigned int port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800220 u32 pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100221 __be16 internal_vlan_id;
Scott Feldman6c707942014-11-28 14:34:28 +0100222 int stp_state;
Scott Feldman5111f802014-11-28 14:34:30 +0100223 u32 brport_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100224 bool ctrls[ROCKER_CTRL_MAX];
225 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100226 struct napi_struct napi_tx;
227 struct napi_struct napi_rx;
228 struct rocker_dma_ring_info tx_ring;
229 struct rocker_dma_ring_info rx_ring;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700230 struct list_head trans_mem;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100231};
232
233struct rocker {
234 struct pci_dev *pdev;
235 u8 __iomem *hw_addr;
236 struct msix_entry *msix_entries;
237 unsigned int port_count;
238 struct rocker_port **ports;
239 struct {
240 u64 id;
241 } hw;
Scott Feldman4725ceb2015-05-10 09:48:08 -0700242 spinlock_t cmd_ring_lock; /* for cmd ring accesses */
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100243 struct rocker_dma_ring_info cmd_ring;
244 struct rocker_dma_ring_info event_ring;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100245 DECLARE_HASHTABLE(flow_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700246 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100247 u64 flow_tbl_next_cookie;
248 DECLARE_HASHTABLE(group_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700249 spinlock_t group_tbl_lock; /* for group tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100250 DECLARE_HASHTABLE(fdb_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700251 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100252 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
253 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700254 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800255 DECLARE_HASHTABLE(neigh_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700256 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800257 u32 neigh_tbl_next_index;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100258};
259
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100260static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
261static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
262static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
263static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
264static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
265static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
266static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
267static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
268static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
269
270/* Rocker priority levels for flow table entries. Higher
271 * priority match takes precedence over lower priority match.
272 */
273
274enum {
275 ROCKER_PRIORITY_UNKNOWN = 0,
276 ROCKER_PRIORITY_IG_PORT = 1,
277 ROCKER_PRIORITY_VLAN = 1,
278 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
279 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100280 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
281 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
282 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
283 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
284 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
285 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
286 ROCKER_PRIORITY_ACL_CTRL = 3,
287 ROCKER_PRIORITY_ACL_NORMAL = 2,
288 ROCKER_PRIORITY_ACL_DFLT = 1,
289};
290
291static bool rocker_vlan_id_is_internal(__be16 vlan_id)
292{
293 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
294 u16 end = 0xffe;
295 u16 _vlan_id = ntohs(vlan_id);
296
297 return (_vlan_id >= start && _vlan_id <= end);
298}
299
Simon Hormane5054642015-05-25 14:28:36 +0900300static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100301 u16 vid, bool *pop_vlan)
302{
303 __be16 vlan_id;
304
305 if (pop_vlan)
306 *pop_vlan = false;
307 vlan_id = htons(vid);
308 if (!vlan_id) {
309 vlan_id = rocker_port->internal_vlan_id;
310 if (pop_vlan)
311 *pop_vlan = true;
312 }
313
314 return vlan_id;
315}
316
Simon Hormane5054642015-05-25 14:28:36 +0900317static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +0100318 __be16 vlan_id)
319{
320 if (rocker_vlan_id_is_internal(vlan_id))
321 return 0;
322
323 return ntohs(vlan_id);
324}
325
Simon Hormane5054642015-05-25 14:28:36 +0900326static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +0100327{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200328 return rocker_port->bridge_dev &&
329 netif_is_bridge_master(rocker_port->bridge_dev);
Simon Horman82549732015-07-16 10:39:14 +0900330}
331
332static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
333{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200334 return rocker_port->bridge_dev &&
335 netif_is_ovs_master(rocker_port->bridge_dev);
Scott Feldman6c707942014-11-28 14:34:28 +0100336}
337
Scott Feldman179f9a22015-06-12 21:35:46 -0700338#define ROCKER_OP_FLAG_REMOVE BIT(0)
339#define ROCKER_OP_FLAG_NOWAIT BIT(1)
340#define ROCKER_OP_FLAG_LEARNED BIT(2)
341#define ROCKER_OP_FLAG_REFRESH BIT(3)
342
Scott Feldmanc4f20322015-05-10 09:47:50 -0700343static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700344 enum switchdev_trans trans, int flags,
345 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700346{
347 struct list_head *elem = NULL;
Scott Feldman179f9a22015-06-12 21:35:46 -0700348 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
349 GFP_ATOMIC : GFP_KERNEL;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700350
351 /* If in transaction prepare phase, allocate the memory
352 * and enqueue it on a per-port list. If in transaction
353 * commit phase, dequeue the memory from the per-port list
354 * rather than re-allocating the memory. The idea is the
355 * driver code paths for prepare and commit are identical
356 * so the memory allocated in the prepare phase is the
357 * memory used in the commit phase.
358 */
359
360 switch (trans) {
361 case SWITCHDEV_TRANS_PREPARE:
Scott Feldman179f9a22015-06-12 21:35:46 -0700362 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700363 if (!elem)
364 return NULL;
365 list_add_tail(elem, &rocker_port->trans_mem);
366 break;
367 case SWITCHDEV_TRANS_COMMIT:
368 BUG_ON(list_empty(&rocker_port->trans_mem));
369 elem = rocker_port->trans_mem.next;
370 list_del_init(elem);
371 break;
372 case SWITCHDEV_TRANS_NONE:
Scott Feldman179f9a22015-06-12 21:35:46 -0700373 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700374 if (elem)
375 INIT_LIST_HEAD(elem);
376 break;
377 default:
378 break;
379 }
380
381 return elem ? elem + 1 : NULL;
382}
383
384static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700385 enum switchdev_trans trans, int flags,
386 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700387{
Scott Feldman179f9a22015-06-12 21:35:46 -0700388 return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700389}
390
391static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700392 enum switchdev_trans trans, int flags,
393 size_t n, size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700394{
Scott Feldman179f9a22015-06-12 21:35:46 -0700395 return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700396}
397
Simon Horman0985df72015-05-25 14:28:35 +0900398static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700399{
400 struct list_head *elem;
401
402 /* Frees are ignored if in transaction prepare phase. The
403 * memory remains on the per-port list until freed in the
404 * commit phase.
405 */
406
407 if (trans == SWITCHDEV_TRANS_PREPARE)
408 return;
409
410 elem = (struct list_head *)mem - 1;
411 BUG_ON(!list_empty(elem));
412 kfree(elem);
413}
414
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100415struct rocker_wait {
416 wait_queue_head_t wait;
417 bool done;
Scott Feldman179f9a22015-06-12 21:35:46 -0700418 bool nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100419};
420
421static void rocker_wait_reset(struct rocker_wait *wait)
422{
423 wait->done = false;
Scott Feldman179f9a22015-06-12 21:35:46 -0700424 wait->nowait = false;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100425}
426
427static void rocker_wait_init(struct rocker_wait *wait)
428{
429 init_waitqueue_head(&wait->wait);
430 rocker_wait_reset(wait);
431}
432
Scott Feldmanc4f20322015-05-10 09:47:50 -0700433static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -0700434 enum switchdev_trans trans,
435 int flags)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100436{
437 struct rocker_wait *wait;
438
Scott Feldman179f9a22015-06-12 21:35:46 -0700439 wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100440 if (!wait)
441 return NULL;
442 rocker_wait_init(wait);
443 return wait;
444}
445
Simon Horman0985df72015-05-25 14:28:35 +0900446static void rocker_wait_destroy(enum switchdev_trans trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -0700447 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100448{
Simon Horman0985df72015-05-25 14:28:35 +0900449 rocker_port_kfree(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100450}
451
452static bool rocker_wait_event_timeout(struct rocker_wait *wait,
453 unsigned long timeout)
454{
455 wait_event_timeout(wait->wait, wait->done, HZ / 10);
456 if (!wait->done)
457 return false;
458 return true;
459}
460
461static void rocker_wait_wake_up(struct rocker_wait *wait)
462{
463 wait->done = true;
464 wake_up(&wait->wait);
465}
466
Simon Hormane5054642015-05-25 14:28:36 +0900467static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100468{
469 return rocker->msix_entries[vector].vector;
470}
471
Simon Hormane5054642015-05-25 14:28:36 +0900472static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100473{
474 return rocker_msix_vector(rocker_port->rocker,
475 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
476}
477
Simon Hormane5054642015-05-25 14:28:36 +0900478static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100479{
480 return rocker_msix_vector(rocker_port->rocker,
481 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
482}
483
484#define rocker_write32(rocker, reg, val) \
485 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
486#define rocker_read32(rocker, reg) \
487 readl((rocker)->hw_addr + (ROCKER_ ## reg))
488#define rocker_write64(rocker, reg, val) \
489 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
490#define rocker_read64(rocker, reg) \
491 readq((rocker)->hw_addr + (ROCKER_ ## reg))
492
493/*****************************
494 * HW basic testing functions
495 *****************************/
496
Simon Hormane5054642015-05-25 14:28:36 +0900497static int rocker_reg_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100498{
Simon Hormane5054642015-05-25 14:28:36 +0900499 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100500 u64 test_reg;
501 u64 rnd;
502
503 rnd = prandom_u32();
504 rnd >>= 1;
505 rocker_write32(rocker, TEST_REG, rnd);
506 test_reg = rocker_read32(rocker, TEST_REG);
507 if (test_reg != rnd * 2) {
508 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
509 test_reg, rnd * 2);
510 return -EIO;
511 }
512
513 rnd = prandom_u32();
514 rnd <<= 31;
515 rnd |= prandom_u32();
516 rocker_write64(rocker, TEST_REG64, rnd);
517 test_reg = rocker_read64(rocker, TEST_REG64);
518 if (test_reg != rnd * 2) {
519 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
520 test_reg, rnd * 2);
521 return -EIO;
522 }
523
524 return 0;
525}
526
Simon Hormane5054642015-05-25 14:28:36 +0900527static int rocker_dma_test_one(const struct rocker *rocker,
528 struct rocker_wait *wait, u32 test_type,
529 dma_addr_t dma_handle, const unsigned char *buf,
530 const unsigned char *expect, size_t size)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100531{
Simon Hormane5054642015-05-25 14:28:36 +0900532 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100533 int i;
534
535 rocker_wait_reset(wait);
536 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
537
538 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
539 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
540 return -EIO;
541 }
542
543 for (i = 0; i < size; i++) {
544 if (buf[i] != expect[i]) {
545 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
546 buf[i], i, expect[i]);
547 return -EIO;
548 }
549 }
550 return 0;
551}
552
553#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
554#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
555
Simon Hormane5054642015-05-25 14:28:36 +0900556static int rocker_dma_test_offset(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100557 struct rocker_wait *wait, int offset)
558{
559 struct pci_dev *pdev = rocker->pdev;
560 unsigned char *alloc;
561 unsigned char *buf;
562 unsigned char *expect;
563 dma_addr_t dma_handle;
564 int i;
565 int err;
566
567 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
568 GFP_KERNEL | GFP_DMA);
569 if (!alloc)
570 return -ENOMEM;
571 buf = alloc + offset;
572 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
573
574 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
575 PCI_DMA_BIDIRECTIONAL);
576 if (pci_dma_mapping_error(pdev, dma_handle)) {
577 err = -EIO;
578 goto free_alloc;
579 }
580
581 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
582 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
583
584 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
585 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
586 dma_handle, buf, expect,
587 ROCKER_TEST_DMA_BUF_SIZE);
588 if (err)
589 goto unmap;
590
591 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
592 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
593 dma_handle, buf, expect,
594 ROCKER_TEST_DMA_BUF_SIZE);
595 if (err)
596 goto unmap;
597
598 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
599 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
600 expect[i] = ~buf[i];
601 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
602 dma_handle, buf, expect,
603 ROCKER_TEST_DMA_BUF_SIZE);
604 if (err)
605 goto unmap;
606
607unmap:
608 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
609 PCI_DMA_BIDIRECTIONAL);
610free_alloc:
611 kfree(alloc);
612
613 return err;
614}
615
Simon Hormane5054642015-05-25 14:28:36 +0900616static int rocker_dma_test(const struct rocker *rocker,
617 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100618{
619 int i;
620 int err;
621
622 for (i = 0; i < 8; i++) {
623 err = rocker_dma_test_offset(rocker, wait, i);
624 if (err)
625 return err;
626 }
627 return 0;
628}
629
630static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
631{
632 struct rocker_wait *wait = dev_id;
633
634 rocker_wait_wake_up(wait);
635
636 return IRQ_HANDLED;
637}
638
Simon Hormane5054642015-05-25 14:28:36 +0900639static int rocker_basic_hw_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100640{
Simon Hormane5054642015-05-25 14:28:36 +0900641 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100642 struct rocker_wait wait;
643 int err;
644
645 err = rocker_reg_test(rocker);
646 if (err) {
647 dev_err(&pdev->dev, "reg test failed\n");
648 return err;
649 }
650
651 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
652 rocker_test_irq_handler, 0,
653 rocker_driver_name, &wait);
654 if (err) {
655 dev_err(&pdev->dev, "cannot assign test irq\n");
656 return err;
657 }
658
659 rocker_wait_init(&wait);
660 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
661
662 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
663 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
664 err = -EIO;
665 goto free_irq;
666 }
667
668 err = rocker_dma_test(rocker, &wait);
669 if (err)
670 dev_err(&pdev->dev, "dma test failed\n");
671
672free_irq:
673 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
674 return err;
675}
676
677/******
678 * TLV
679 ******/
680
681#define ROCKER_TLV_ALIGNTO 8U
682#define ROCKER_TLV_ALIGN(len) \
683 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
684#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
685
686/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
687 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
688 * | Header | Pad | Payload | Pad |
689 * | (struct rocker_tlv) | ing | | ing |
690 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
691 * <--------------------------- tlv->len -------------------------->
692 */
693
694static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
695 int *remaining)
696{
697 int totlen = ROCKER_TLV_ALIGN(tlv->len);
698
699 *remaining -= totlen;
700 return (struct rocker_tlv *) ((char *) tlv + totlen);
701}
702
703static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
704{
705 return remaining >= (int) ROCKER_TLV_HDRLEN &&
706 tlv->len >= ROCKER_TLV_HDRLEN &&
707 tlv->len <= remaining;
708}
709
710#define rocker_tlv_for_each(pos, head, len, rem) \
711 for (pos = head, rem = len; \
712 rocker_tlv_ok(pos, rem); \
713 pos = rocker_tlv_next(pos, &(rem)))
714
715#define rocker_tlv_for_each_nested(pos, tlv, rem) \
716 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
717 rocker_tlv_len(tlv), rem)
718
719static int rocker_tlv_attr_size(int payload)
720{
721 return ROCKER_TLV_HDRLEN + payload;
722}
723
724static int rocker_tlv_total_size(int payload)
725{
726 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
727}
728
729static int rocker_tlv_padlen(int payload)
730{
731 return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
732}
733
734static int rocker_tlv_type(const struct rocker_tlv *tlv)
735{
736 return tlv->type;
737}
738
739static void *rocker_tlv_data(const struct rocker_tlv *tlv)
740{
741 return (char *) tlv + ROCKER_TLV_HDRLEN;
742}
743
744static int rocker_tlv_len(const struct rocker_tlv *tlv)
745{
746 return tlv->len - ROCKER_TLV_HDRLEN;
747}
748
749static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
750{
751 return *(u8 *) rocker_tlv_data(tlv);
752}
753
754static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
755{
756 return *(u16 *) rocker_tlv_data(tlv);
757}
758
Jiri Pirko9b03c712014-12-03 14:14:53 +0100759static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
760{
761 return *(__be16 *) rocker_tlv_data(tlv);
762}
763
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100764static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
765{
766 return *(u32 *) rocker_tlv_data(tlv);
767}
768
769static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
770{
771 return *(u64 *) rocker_tlv_data(tlv);
772}
773
Simon Hormane5054642015-05-25 14:28:36 +0900774static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100775 const char *buf, int buf_len)
776{
777 const struct rocker_tlv *tlv;
778 const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
779 int rem;
780
781 memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
782
783 rocker_tlv_for_each(tlv, head, buf_len, rem) {
784 u32 type = rocker_tlv_type(tlv);
785
786 if (type > 0 && type <= maxtype)
Simon Hormane5054642015-05-25 14:28:36 +0900787 tb[type] = tlv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100788 }
789}
790
Simon Hormane5054642015-05-25 14:28:36 +0900791static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100792 const struct rocker_tlv *tlv)
793{
794 rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
795 rocker_tlv_len(tlv));
796}
797
Simon Hormane5054642015-05-25 14:28:36 +0900798static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
799 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100800{
801 rocker_tlv_parse(tb, maxtype, desc_info->data,
802 desc_info->desc->tlv_size);
803}
804
805static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
806{
807 return (struct rocker_tlv *) ((char *) desc_info->data +
808 desc_info->tlv_size);
809}
810
811static int rocker_tlv_put(struct rocker_desc_info *desc_info,
812 int attrtype, int attrlen, const void *data)
813{
814 int tail_room = desc_info->data_size - desc_info->tlv_size;
815 int total_size = rocker_tlv_total_size(attrlen);
816 struct rocker_tlv *tlv;
817
818 if (unlikely(tail_room < total_size))
819 return -EMSGSIZE;
820
821 tlv = rocker_tlv_start(desc_info);
822 desc_info->tlv_size += total_size;
823 tlv->type = attrtype;
824 tlv->len = rocker_tlv_attr_size(attrlen);
825 memcpy(rocker_tlv_data(tlv), data, attrlen);
826 memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
827 return 0;
828}
829
830static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
831 int attrtype, u8 value)
832{
833 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
834}
835
836static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
837 int attrtype, u16 value)
838{
839 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
840}
841
Jiri Pirko9b03c712014-12-03 14:14:53 +0100842static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
843 int attrtype, __be16 value)
844{
845 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
846}
847
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100848static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
849 int attrtype, u32 value)
850{
851 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
852}
853
Jiri Pirko9b03c712014-12-03 14:14:53 +0100854static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
855 int attrtype, __be32 value)
856{
857 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
858}
859
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100860static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
861 int attrtype, u64 value)
862{
863 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
864}
865
866static struct rocker_tlv *
867rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
868{
869 struct rocker_tlv *start = rocker_tlv_start(desc_info);
870
871 if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
872 return NULL;
873
874 return start;
875}
876
877static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
878 struct rocker_tlv *start)
879{
880 start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
881}
882
883static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +0900884 const struct rocker_tlv *start)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100885{
Simon Hormane5054642015-05-25 14:28:36 +0900886 desc_info->tlv_size = (const char *) start - desc_info->data;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100887}
888
889/******************************************
890 * DMA rings and descriptors manipulations
891 ******************************************/
892
893static u32 __pos_inc(u32 pos, size_t limit)
894{
895 return ++pos == limit ? 0 : pos;
896}
897
Simon Hormane5054642015-05-25 14:28:36 +0900898static int rocker_desc_err(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100899{
Scott Feldman7eb344f2015-02-25 20:15:36 -0800900 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
901
902 switch (err) {
903 case ROCKER_OK:
904 return 0;
905 case -ROCKER_ENOENT:
906 return -ENOENT;
907 case -ROCKER_ENXIO:
908 return -ENXIO;
909 case -ROCKER_ENOMEM:
910 return -ENOMEM;
911 case -ROCKER_EEXIST:
912 return -EEXIST;
913 case -ROCKER_EINVAL:
914 return -EINVAL;
915 case -ROCKER_EMSGSIZE:
916 return -EMSGSIZE;
917 case -ROCKER_ENOTSUP:
918 return -EOPNOTSUPP;
919 case -ROCKER_ENOBUFS:
920 return -ENOBUFS;
921 }
922
923 return -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100924}
925
Simon Hormane5054642015-05-25 14:28:36 +0900926static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100927{
928 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
929}
930
Simon Hormane5054642015-05-25 14:28:36 +0900931static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100932{
933 u32 comp_err = desc_info->desc->comp_err;
934
935 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
936}
937
Simon Hormane5054642015-05-25 14:28:36 +0900938static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100939{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100940 return (void *)(uintptr_t)desc_info->desc->cookie;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100941}
942
Simon Hormane5054642015-05-25 14:28:36 +0900943static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100944 void *ptr)
945{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100946 desc_info->desc->cookie = (uintptr_t) ptr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100947}
948
949static struct rocker_desc_info *
Simon Hormane5054642015-05-25 14:28:36 +0900950rocker_desc_head_get(const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100951{
952 static struct rocker_desc_info *desc_info;
953 u32 head = __pos_inc(info->head, info->size);
954
955 desc_info = &info->desc_info[info->head];
956 if (head == info->tail)
957 return NULL; /* ring full */
958 desc_info->tlv_size = 0;
959 return desc_info;
960}
961
Simon Hormane5054642015-05-25 14:28:36 +0900962static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100963{
964 desc_info->desc->buf_size = desc_info->data_size;
965 desc_info->desc->tlv_size = desc_info->tlv_size;
966}
967
Simon Hormane5054642015-05-25 14:28:36 +0900968static void rocker_desc_head_set(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100969 struct rocker_dma_ring_info *info,
Simon Hormane5054642015-05-25 14:28:36 +0900970 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100971{
972 u32 head = __pos_inc(info->head, info->size);
973
974 BUG_ON(head == info->tail);
975 rocker_desc_commit(desc_info);
976 info->head = head;
977 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
978}
979
980static struct rocker_desc_info *
981rocker_desc_tail_get(struct rocker_dma_ring_info *info)
982{
983 static struct rocker_desc_info *desc_info;
984
985 if (info->tail == info->head)
986 return NULL; /* nothing to be done between head and tail */
987 desc_info = &info->desc_info[info->tail];
988 if (!rocker_desc_gen(desc_info))
989 return NULL; /* gen bit not set, desc is not ready yet */
990 info->tail = __pos_inc(info->tail, info->size);
991 desc_info->tlv_size = desc_info->desc->tlv_size;
992 return desc_info;
993}
994
Simon Hormane5054642015-05-25 14:28:36 +0900995static void rocker_dma_ring_credits_set(const struct rocker *rocker,
996 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100997 u32 credits)
998{
999 if (credits)
1000 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
1001}
1002
1003static unsigned long rocker_dma_ring_size_fix(size_t size)
1004{
1005 return max(ROCKER_DMA_SIZE_MIN,
1006 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
1007}
1008
Simon Hormane5054642015-05-25 14:28:36 +09001009static int rocker_dma_ring_create(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001010 unsigned int type,
1011 size_t size,
1012 struct rocker_dma_ring_info *info)
1013{
1014 int i;
1015
1016 BUG_ON(size != rocker_dma_ring_size_fix(size));
1017 info->size = size;
1018 info->type = type;
1019 info->head = 0;
1020 info->tail = 0;
1021 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
1022 GFP_KERNEL);
1023 if (!info->desc_info)
1024 return -ENOMEM;
1025
1026 info->desc = pci_alloc_consistent(rocker->pdev,
1027 info->size * sizeof(*info->desc),
1028 &info->mapaddr);
1029 if (!info->desc) {
1030 kfree(info->desc_info);
1031 return -ENOMEM;
1032 }
1033
1034 for (i = 0; i < info->size; i++)
1035 info->desc_info[i].desc = &info->desc[i];
1036
1037 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
1038 ROCKER_DMA_DESC_CTRL_RESET);
1039 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
1040 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
1041
1042 return 0;
1043}
1044
Simon Hormane5054642015-05-25 14:28:36 +09001045static void rocker_dma_ring_destroy(const struct rocker *rocker,
1046 const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001047{
1048 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
1049
1050 pci_free_consistent(rocker->pdev,
1051 info->size * sizeof(struct rocker_desc),
1052 info->desc, info->mapaddr);
1053 kfree(info->desc_info);
1054}
1055
Simon Hormane5054642015-05-25 14:28:36 +09001056static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001057 struct rocker_dma_ring_info *info)
1058{
1059 int i;
1060
1061 BUG_ON(info->head || info->tail);
1062
1063 /* When ring is consumer, we need to advance head for each desc.
1064 * That tells hw that the desc is ready to be used by it.
1065 */
1066 for (i = 0; i < info->size - 1; i++)
1067 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
1068 rocker_desc_commit(&info->desc_info[i]);
1069}
1070
Simon Hormane5054642015-05-25 14:28:36 +09001071static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
1072 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001073 int direction, size_t buf_size)
1074{
1075 struct pci_dev *pdev = rocker->pdev;
1076 int i;
1077 int err;
1078
1079 for (i = 0; i < info->size; i++) {
1080 struct rocker_desc_info *desc_info = &info->desc_info[i];
1081 struct rocker_desc *desc = &info->desc[i];
1082 dma_addr_t dma_handle;
1083 char *buf;
1084
1085 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
1086 if (!buf) {
1087 err = -ENOMEM;
1088 goto rollback;
1089 }
1090
1091 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1092 if (pci_dma_mapping_error(pdev, dma_handle)) {
1093 kfree(buf);
1094 err = -EIO;
1095 goto rollback;
1096 }
1097
1098 desc_info->data = buf;
1099 desc_info->data_size = buf_size;
1100 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1101
1102 desc->buf_addr = dma_handle;
1103 desc->buf_size = buf_size;
1104 }
1105 return 0;
1106
1107rollback:
1108 for (i--; i >= 0; i--) {
Simon Hormane5054642015-05-25 14:28:36 +09001109 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001110
1111 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1112 desc_info->data_size, direction);
1113 kfree(desc_info->data);
1114 }
1115 return err;
1116}
1117
Simon Hormane5054642015-05-25 14:28:36 +09001118static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
1119 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001120 int direction)
1121{
1122 struct pci_dev *pdev = rocker->pdev;
1123 int i;
1124
1125 for (i = 0; i < info->size; i++) {
Simon Hormane5054642015-05-25 14:28:36 +09001126 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001127 struct rocker_desc *desc = &info->desc[i];
1128
1129 desc->buf_addr = 0;
1130 desc->buf_size = 0;
1131 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1132 desc_info->data_size, direction);
1133 kfree(desc_info->data);
1134 }
1135}
1136
1137static int rocker_dma_rings_init(struct rocker *rocker)
1138{
Simon Hormane5054642015-05-25 14:28:36 +09001139 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001140 int err;
1141
1142 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1143 ROCKER_DMA_CMD_DEFAULT_SIZE,
1144 &rocker->cmd_ring);
1145 if (err) {
1146 dev_err(&pdev->dev, "failed to create command dma ring\n");
1147 return err;
1148 }
1149
1150 spin_lock_init(&rocker->cmd_ring_lock);
1151
1152 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1153 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1154 if (err) {
1155 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1156 goto err_dma_cmd_ring_bufs_alloc;
1157 }
1158
1159 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1160 ROCKER_DMA_EVENT_DEFAULT_SIZE,
1161 &rocker->event_ring);
1162 if (err) {
1163 dev_err(&pdev->dev, "failed to create event dma ring\n");
1164 goto err_dma_event_ring_create;
1165 }
1166
1167 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1168 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1169 if (err) {
1170 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1171 goto err_dma_event_ring_bufs_alloc;
1172 }
1173 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1174 return 0;
1175
1176err_dma_event_ring_bufs_alloc:
1177 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1178err_dma_event_ring_create:
1179 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1180 PCI_DMA_BIDIRECTIONAL);
1181err_dma_cmd_ring_bufs_alloc:
1182 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1183 return err;
1184}
1185
1186static void rocker_dma_rings_fini(struct rocker *rocker)
1187{
1188 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1189 PCI_DMA_BIDIRECTIONAL);
1190 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1191 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1192 PCI_DMA_BIDIRECTIONAL);
1193 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1194}
1195
Simon Horman534ba6a2015-06-01 13:25:04 +09001196static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001197 struct rocker_desc_info *desc_info,
1198 struct sk_buff *skb, size_t buf_len)
1199{
Simon Horman534ba6a2015-06-01 13:25:04 +09001200 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001201 struct pci_dev *pdev = rocker->pdev;
1202 dma_addr_t dma_handle;
1203
1204 dma_handle = pci_map_single(pdev, skb->data, buf_len,
1205 PCI_DMA_FROMDEVICE);
1206 if (pci_dma_mapping_error(pdev, dma_handle))
1207 return -EIO;
1208 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1209 goto tlv_put_failure;
1210 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1211 goto tlv_put_failure;
1212 return 0;
1213
1214tlv_put_failure:
1215 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1216 desc_info->tlv_size = 0;
1217 return -EMSGSIZE;
1218}
1219
Simon Hormane5054642015-05-25 14:28:36 +09001220static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001221{
1222 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1223}
1224
Simon Horman534ba6a2015-06-01 13:25:04 +09001225static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001226 struct rocker_desc_info *desc_info)
1227{
1228 struct net_device *dev = rocker_port->dev;
1229 struct sk_buff *skb;
1230 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1231 int err;
1232
1233 /* Ensure that hw will see tlv_size zero in case of an error.
1234 * That tells hw to use another descriptor.
1235 */
1236 rocker_desc_cookie_ptr_set(desc_info, NULL);
1237 desc_info->tlv_size = 0;
1238
1239 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1240 if (!skb)
1241 return -ENOMEM;
Simon Horman534ba6a2015-06-01 13:25:04 +09001242 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001243 if (err) {
1244 dev_kfree_skb_any(skb);
1245 return err;
1246 }
1247 rocker_desc_cookie_ptr_set(desc_info, skb);
1248 return 0;
1249}
1250
Simon Hormane5054642015-05-25 14:28:36 +09001251static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1252 const struct rocker_tlv **attrs)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001253{
1254 struct pci_dev *pdev = rocker->pdev;
1255 dma_addr_t dma_handle;
1256 size_t len;
1257
1258 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1259 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1260 return;
1261 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1262 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1263 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1264}
1265
Simon Hormane5054642015-05-25 14:28:36 +09001266static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1267 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001268{
Simon Hormane5054642015-05-25 14:28:36 +09001269 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001270 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1271
1272 if (!skb)
1273 return;
1274 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1275 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1276 dev_kfree_skb_any(skb);
1277}
1278
Simon Horman534ba6a2015-06-01 13:25:04 +09001279static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001280{
Simon Hormane5054642015-05-25 14:28:36 +09001281 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001282 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001283 int i;
1284 int err;
1285
1286 for (i = 0; i < rx_ring->size; i++) {
Simon Horman534ba6a2015-06-01 13:25:04 +09001287 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001288 &rx_ring->desc_info[i]);
1289 if (err)
1290 goto rollback;
1291 }
1292 return 0;
1293
1294rollback:
1295 for (i--; i >= 0; i--)
1296 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1297 return err;
1298}
1299
Simon Horman534ba6a2015-06-01 13:25:04 +09001300static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001301{
Simon Hormane5054642015-05-25 14:28:36 +09001302 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001303 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001304 int i;
1305
1306 for (i = 0; i < rx_ring->size; i++)
1307 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1308}
1309
1310static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1311{
1312 struct rocker *rocker = rocker_port->rocker;
1313 int err;
1314
1315 err = rocker_dma_ring_create(rocker,
1316 ROCKER_DMA_TX(rocker_port->port_number),
1317 ROCKER_DMA_TX_DEFAULT_SIZE,
1318 &rocker_port->tx_ring);
1319 if (err) {
1320 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1321 return err;
1322 }
1323
1324 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1325 PCI_DMA_TODEVICE,
1326 ROCKER_DMA_TX_DESC_SIZE);
1327 if (err) {
1328 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1329 goto err_dma_tx_ring_bufs_alloc;
1330 }
1331
1332 err = rocker_dma_ring_create(rocker,
1333 ROCKER_DMA_RX(rocker_port->port_number),
1334 ROCKER_DMA_RX_DEFAULT_SIZE,
1335 &rocker_port->rx_ring);
1336 if (err) {
1337 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1338 goto err_dma_rx_ring_create;
1339 }
1340
1341 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1342 PCI_DMA_BIDIRECTIONAL,
1343 ROCKER_DMA_RX_DESC_SIZE);
1344 if (err) {
1345 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1346 goto err_dma_rx_ring_bufs_alloc;
1347 }
1348
Simon Horman534ba6a2015-06-01 13:25:04 +09001349 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001350 if (err) {
1351 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1352 goto err_dma_rx_ring_skbs_alloc;
1353 }
1354 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1355
1356 return 0;
1357
1358err_dma_rx_ring_skbs_alloc:
1359 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1360 PCI_DMA_BIDIRECTIONAL);
1361err_dma_rx_ring_bufs_alloc:
1362 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1363err_dma_rx_ring_create:
1364 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1365 PCI_DMA_TODEVICE);
1366err_dma_tx_ring_bufs_alloc:
1367 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1368 return err;
1369}
1370
1371static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1372{
1373 struct rocker *rocker = rocker_port->rocker;
1374
Simon Horman534ba6a2015-06-01 13:25:04 +09001375 rocker_dma_rx_ring_skbs_free(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001376 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1377 PCI_DMA_BIDIRECTIONAL);
1378 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1379 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1380 PCI_DMA_TODEVICE);
1381 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1382}
1383
Simon Hormane5054642015-05-25 14:28:36 +09001384static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1385 bool enable)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001386{
1387 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1388
1389 if (enable)
David S. Miller71a83a62015-03-03 21:16:48 -05001390 val |= 1ULL << rocker_port->pport;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001391 else
David S. Miller71a83a62015-03-03 21:16:48 -05001392 val &= ~(1ULL << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001393 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1394}
1395
1396/********************************
1397 * Interrupt handler and helpers
1398 ********************************/
1399
1400static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1401{
1402 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001403 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001404 struct rocker_wait *wait;
1405 u32 credits = 0;
1406
1407 spin_lock(&rocker->cmd_ring_lock);
1408 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1409 wait = rocker_desc_cookie_ptr_get(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001410 if (wait->nowait) {
1411 rocker_desc_gen_clear(desc_info);
1412 rocker_wait_destroy(SWITCHDEV_TRANS_NONE, wait);
1413 } else {
1414 rocker_wait_wake_up(wait);
1415 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001416 credits++;
1417 }
1418 spin_unlock(&rocker->cmd_ring_lock);
1419 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1420
1421 return IRQ_HANDLED;
1422}
1423
Simon Hormane5054642015-05-25 14:28:36 +09001424static void rocker_port_link_up(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001425{
1426 netif_carrier_on(rocker_port->dev);
1427 netdev_info(rocker_port->dev, "Link is up\n");
1428}
1429
Simon Hormane5054642015-05-25 14:28:36 +09001430static void rocker_port_link_down(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001431{
1432 netif_carrier_off(rocker_port->dev);
1433 netdev_info(rocker_port->dev, "Link is down\n");
1434}
1435
Simon Hormane5054642015-05-25 14:28:36 +09001436static int rocker_event_link_change(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001437 const struct rocker_tlv *info)
1438{
Simon Hormane5054642015-05-25 14:28:36 +09001439 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001440 unsigned int port_number;
1441 bool link_up;
1442 struct rocker_port *rocker_port;
1443
1444 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001445 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001446 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1447 return -EIO;
1448 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001449 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001450 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1451
1452 if (port_number >= rocker->port_count)
1453 return -EINVAL;
1454
1455 rocker_port = rocker->ports[port_number];
1456 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1457 if (link_up)
1458 rocker_port_link_up(rocker_port);
1459 else
1460 rocker_port_link_down(rocker_port);
1461 }
1462
1463 return 0;
1464}
1465
Scott Feldman6c707942014-11-28 14:34:28 +01001466static int rocker_port_fdb(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001467 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01001468 const unsigned char *addr,
1469 __be16 vlan_id, int flags);
1470
Simon Hormane5054642015-05-25 14:28:36 +09001471static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
Scott Feldman6c707942014-11-28 14:34:28 +01001472 const struct rocker_tlv *info)
1473{
Simon Hormane5054642015-05-25 14:28:36 +09001474 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
Scott Feldman6c707942014-11-28 14:34:28 +01001475 unsigned int port_number;
1476 struct rocker_port *rocker_port;
Simon Hormane5054642015-05-25 14:28:36 +09001477 const unsigned char *addr;
Scott Feldman92014b92015-06-12 21:35:49 -07001478 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
Scott Feldman6c707942014-11-28 14:34:28 +01001479 __be16 vlan_id;
1480
1481 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001482 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
Scott Feldman6c707942014-11-28 14:34:28 +01001483 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1484 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1485 return -EIO;
1486 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001487 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
Scott Feldman6c707942014-11-28 14:34:28 +01001488 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
Jiri Pirko9b03c712014-12-03 14:14:53 +01001489 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
Scott Feldman6c707942014-11-28 14:34:28 +01001490
1491 if (port_number >= rocker->port_count)
1492 return -EINVAL;
1493
1494 rocker_port = rocker->ports[port_number];
1495
1496 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1497 rocker_port->stp_state != BR_STATE_FORWARDING)
1498 return 0;
1499
Scott Feldman92014b92015-06-12 21:35:49 -07001500 return rocker_port_fdb(rocker_port, SWITCHDEV_TRANS_NONE,
1501 addr, vlan_id, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01001502}
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001503
Simon Hormane5054642015-05-25 14:28:36 +09001504static int rocker_event_process(const struct rocker *rocker,
1505 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001506{
Simon Hormane5054642015-05-25 14:28:36 +09001507 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1508 const struct rocker_tlv *info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001509 u16 type;
1510
1511 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1512 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1513 !attrs[ROCKER_TLV_EVENT_INFO])
1514 return -EIO;
1515
1516 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1517 info = attrs[ROCKER_TLV_EVENT_INFO];
1518
1519 switch (type) {
1520 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1521 return rocker_event_link_change(rocker, info);
Scott Feldman6c707942014-11-28 14:34:28 +01001522 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1523 return rocker_event_mac_vlan_seen(rocker, info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001524 }
1525
1526 return -EOPNOTSUPP;
1527}
1528
1529static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1530{
1531 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001532 const struct pci_dev *pdev = rocker->pdev;
1533 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001534 u32 credits = 0;
1535 int err;
1536
1537 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1538 err = rocker_desc_err(desc_info);
1539 if (err) {
1540 dev_err(&pdev->dev, "event desc received with err %d\n",
1541 err);
1542 } else {
1543 err = rocker_event_process(rocker, desc_info);
1544 if (err)
1545 dev_err(&pdev->dev, "event processing failed with err %d\n",
1546 err);
1547 }
1548 rocker_desc_gen_clear(desc_info);
1549 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1550 credits++;
1551 }
1552 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1553
1554 return IRQ_HANDLED;
1555}
1556
1557static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1558{
1559 struct rocker_port *rocker_port = dev_id;
1560
1561 napi_schedule(&rocker_port->napi_tx);
1562 return IRQ_HANDLED;
1563}
1564
1565static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1566{
1567 struct rocker_port *rocker_port = dev_id;
1568
1569 napi_schedule(&rocker_port->napi_rx);
1570 return IRQ_HANDLED;
1571}
1572
1573/********************
1574 * Command interface
1575 ********************/
1576
Simon Horman534ba6a2015-06-01 13:25:04 +09001577typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001578 struct rocker_desc_info *desc_info,
1579 void *priv);
1580
Simon Horman534ba6a2015-06-01 13:25:04 +09001581typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001582 const struct rocker_desc_info *desc_info,
1583 void *priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001584
Simon Horman534ba6a2015-06-01 13:25:04 +09001585static int rocker_cmd_exec(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07001586 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09001587 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1588 rocker_cmd_proc_cb_t process, void *process_priv)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001589{
Simon Horman534ba6a2015-06-01 13:25:04 +09001590 struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001591 struct rocker_desc_info *desc_info;
1592 struct rocker_wait *wait;
Scott Feldman179f9a22015-06-12 21:35:46 -07001593 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1594 unsigned long lock_flags;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001595 int err;
1596
Scott Feldman179f9a22015-06-12 21:35:46 -07001597 wait = rocker_wait_create(rocker_port, trans, flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001598 if (!wait)
1599 return -ENOMEM;
Scott Feldman179f9a22015-06-12 21:35:46 -07001600 wait->nowait = nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001601
Scott Feldman179f9a22015-06-12 21:35:46 -07001602 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001603
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001604 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1605 if (!desc_info) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001606 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001607 err = -EAGAIN;
1608 goto out;
1609 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001610
Simon Horman534ba6a2015-06-01 13:25:04 +09001611 err = prepare(rocker_port, desc_info, prepare_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001612 if (err) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001613 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001614 goto out;
1615 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001616
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001617 rocker_desc_cookie_ptr_set(desc_info, wait);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001618
1619 if (trans != SWITCHDEV_TRANS_PREPARE)
1620 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1621
Scott Feldman179f9a22015-06-12 21:35:46 -07001622 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1623
1624 if (nowait)
1625 return 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001626
Scott Feldmanc4f20322015-05-10 09:47:50 -07001627 if (trans != SWITCHDEV_TRANS_PREPARE)
1628 if (!rocker_wait_event_timeout(wait, HZ / 10))
1629 return -EIO;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001630
1631 err = rocker_desc_err(desc_info);
1632 if (err)
1633 return err;
1634
1635 if (process)
Simon Horman534ba6a2015-06-01 13:25:04 +09001636 err = process(rocker_port, desc_info, process_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001637
1638 rocker_desc_gen_clear(desc_info);
1639out:
Simon Horman0985df72015-05-25 14:28:35 +09001640 rocker_wait_destroy(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001641 return err;
1642}
1643
1644static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001645rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001646 struct rocker_desc_info *desc_info,
1647 void *priv)
1648{
1649 struct rocker_tlv *cmd_info;
1650
1651 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1652 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1653 return -EMSGSIZE;
1654 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1655 if (!cmd_info)
1656 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001657 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1658 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001659 return -EMSGSIZE;
1660 rocker_tlv_nest_end(desc_info, cmd_info);
1661 return 0;
1662}
1663
1664static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001665rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001666 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001667 void *priv)
1668{
1669 struct ethtool_cmd *ecmd = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001670 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1671 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001672 u32 speed;
1673 u8 duplex;
1674 u8 autoneg;
1675
1676 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1677 if (!attrs[ROCKER_TLV_CMD_INFO])
1678 return -EIO;
1679
1680 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1681 attrs[ROCKER_TLV_CMD_INFO]);
1682 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1683 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1684 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1685 return -EIO;
1686
1687 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1688 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1689 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1690
1691 ecmd->transceiver = XCVR_INTERNAL;
1692 ecmd->supported = SUPPORTED_TP;
1693 ecmd->phy_address = 0xff;
1694 ecmd->port = PORT_TP;
1695 ethtool_cmd_speed_set(ecmd, speed);
1696 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1697 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1698
1699 return 0;
1700}
1701
1702static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001703rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001704 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001705 void *priv)
1706{
1707 unsigned char *macaddr = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001708 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1709 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1710 const struct rocker_tlv *attr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001711
1712 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1713 if (!attrs[ROCKER_TLV_CMD_INFO])
1714 return -EIO;
1715
1716 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1717 attrs[ROCKER_TLV_CMD_INFO]);
1718 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1719 if (!attr)
1720 return -EIO;
1721
1722 if (rocker_tlv_len(attr) != ETH_ALEN)
1723 return -EINVAL;
1724
1725 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1726 return 0;
1727}
1728
David Aherndb191702015-03-17 20:23:16 -06001729struct port_name {
1730 char *buf;
1731 size_t len;
1732};
1733
1734static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001735rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001736 const struct rocker_desc_info *desc_info,
David Aherndb191702015-03-17 20:23:16 -06001737 void *priv)
1738{
Simon Hormane5054642015-05-25 14:28:36 +09001739 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1740 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
David Aherndb191702015-03-17 20:23:16 -06001741 struct port_name *name = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001742 const struct rocker_tlv *attr;
David Aherndb191702015-03-17 20:23:16 -06001743 size_t i, j, len;
Simon Hormane5054642015-05-25 14:28:36 +09001744 const char *str;
David Aherndb191702015-03-17 20:23:16 -06001745
1746 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1747 if (!attrs[ROCKER_TLV_CMD_INFO])
1748 return -EIO;
1749
1750 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1751 attrs[ROCKER_TLV_CMD_INFO]);
1752 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1753 if (!attr)
1754 return -EIO;
1755
1756 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1757 str = rocker_tlv_data(attr);
1758
1759 /* make sure name only contains alphanumeric characters */
1760 for (i = j = 0; i < len; ++i) {
1761 if (isalnum(str[i])) {
1762 name->buf[j] = str[i];
1763 j++;
1764 }
1765 }
1766
1767 if (j == 0)
1768 return -EIO;
1769
1770 name->buf[j] = '\0';
1771
1772 return 0;
1773}
1774
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001775static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001776rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001777 struct rocker_desc_info *desc_info,
1778 void *priv)
1779{
1780 struct ethtool_cmd *ecmd = priv;
1781 struct rocker_tlv *cmd_info;
1782
1783 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1784 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1785 return -EMSGSIZE;
1786 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1787 if (!cmd_info)
1788 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001789 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1790 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001791 return -EMSGSIZE;
1792 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1793 ethtool_cmd_speed(ecmd)))
1794 return -EMSGSIZE;
1795 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1796 ecmd->duplex))
1797 return -EMSGSIZE;
1798 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1799 ecmd->autoneg))
1800 return -EMSGSIZE;
1801 rocker_tlv_nest_end(desc_info, cmd_info);
1802 return 0;
1803}
1804
1805static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001806rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001807 struct rocker_desc_info *desc_info,
1808 void *priv)
1809{
Simon Hormane5054642015-05-25 14:28:36 +09001810 const unsigned char *macaddr = priv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001811 struct rocker_tlv *cmd_info;
1812
1813 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1814 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1815 return -EMSGSIZE;
1816 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1817 if (!cmd_info)
1818 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001819 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1820 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001821 return -EMSGSIZE;
1822 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1823 ETH_ALEN, macaddr))
1824 return -EMSGSIZE;
1825 rocker_tlv_nest_end(desc_info, cmd_info);
1826 return 0;
1827}
1828
Scott Feldman5111f802014-11-28 14:34:30 +01001829static int
Scott Feldman77a58c72015-07-08 16:06:47 -07001830rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1831 struct rocker_desc_info *desc_info,
1832 void *priv)
1833{
1834 int mtu = *(int *)priv;
1835 struct rocker_tlv *cmd_info;
1836
1837 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1838 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1839 return -EMSGSIZE;
1840 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1841 if (!cmd_info)
1842 return -EMSGSIZE;
1843 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1844 rocker_port->pport))
1845 return -EMSGSIZE;
1846 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1847 mtu))
1848 return -EMSGSIZE;
1849 rocker_tlv_nest_end(desc_info, cmd_info);
1850 return 0;
1851}
1852
1853static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001854rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
Scott Feldman5111f802014-11-28 14:34:30 +01001855 struct rocker_desc_info *desc_info,
1856 void *priv)
1857{
1858 struct rocker_tlv *cmd_info;
1859
1860 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1861 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1862 return -EMSGSIZE;
1863 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1864 if (!cmd_info)
1865 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001866 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1867 rocker_port->pport))
Scott Feldman5111f802014-11-28 14:34:30 +01001868 return -EMSGSIZE;
1869 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1870 !!(rocker_port->brport_flags & BR_LEARNING)))
1871 return -EMSGSIZE;
1872 rocker_tlv_nest_end(desc_info, cmd_info);
1873 return 0;
1874}
1875
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001876static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1877 struct ethtool_cmd *ecmd)
1878{
Scott Feldman179f9a22015-06-12 21:35:46 -07001879 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001880 rocker_cmd_get_port_settings_prep, NULL,
1881 rocker_cmd_get_port_settings_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001882 ecmd);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001883}
1884
1885static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1886 unsigned char *macaddr)
1887{
Scott Feldman179f9a22015-06-12 21:35:46 -07001888 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001889 rocker_cmd_get_port_settings_prep, NULL,
1890 rocker_cmd_get_port_settings_macaddr_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001891 macaddr);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001892}
1893
1894static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1895 struct ethtool_cmd *ecmd)
1896{
Scott Feldman179f9a22015-06-12 21:35:46 -07001897 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001898 rocker_cmd_set_port_settings_ethtool_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001899 ecmd, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001900}
1901
1902static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1903 unsigned char *macaddr)
1904{
Scott Feldman179f9a22015-06-12 21:35:46 -07001905 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001906 rocker_cmd_set_port_settings_macaddr_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001907 macaddr, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001908}
1909
Scott Feldman77a58c72015-07-08 16:06:47 -07001910static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1911 int mtu)
1912{
1913 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
1914 rocker_cmd_set_port_settings_mtu_prep,
1915 &mtu, NULL, NULL);
1916}
1917
Scott Feldmanc4f20322015-05-10 09:47:50 -07001918static int rocker_port_set_learning(struct rocker_port *rocker_port,
1919 enum switchdev_trans trans)
Scott Feldman5111f802014-11-28 14:34:30 +01001920{
Scott Feldman179f9a22015-06-12 21:35:46 -07001921 return rocker_cmd_exec(rocker_port, trans, 0,
Scott Feldman5111f802014-11-28 14:34:30 +01001922 rocker_cmd_set_port_learning_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001923 NULL, NULL, NULL);
Scott Feldman5111f802014-11-28 14:34:30 +01001924}
1925
Simon Hormane5054642015-05-25 14:28:36 +09001926static int
1927rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1928 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001929{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001930 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1931 entry->key.ig_port.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001932 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001933 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1934 entry->key.ig_port.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001935 return -EMSGSIZE;
1936 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1937 entry->key.ig_port.goto_tbl))
1938 return -EMSGSIZE;
1939
1940 return 0;
1941}
1942
Simon Hormane5054642015-05-25 14:28:36 +09001943static int
1944rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1945 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001946{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001947 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1948 entry->key.vlan.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001949 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001950 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1951 entry->key.vlan.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001952 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001953 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1954 entry->key.vlan.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001955 return -EMSGSIZE;
1956 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1957 entry->key.vlan.goto_tbl))
1958 return -EMSGSIZE;
1959 if (entry->key.vlan.untagged &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01001960 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1961 entry->key.vlan.new_vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001962 return -EMSGSIZE;
1963
1964 return 0;
1965}
1966
Simon Hormane5054642015-05-25 14:28:36 +09001967static int
1968rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1969 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001970{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001971 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1972 entry->key.term_mac.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001973 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001974 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1975 entry->key.term_mac.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001976 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001977 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1978 entry->key.term_mac.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001979 return -EMSGSIZE;
1980 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1981 ETH_ALEN, entry->key.term_mac.eth_dst))
1982 return -EMSGSIZE;
1983 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1984 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1985 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001986 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1987 entry->key.term_mac.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001988 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001989 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1990 entry->key.term_mac.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001991 return -EMSGSIZE;
1992 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1993 entry->key.term_mac.goto_tbl))
1994 return -EMSGSIZE;
1995 if (entry->key.term_mac.copy_to_cpu &&
1996 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1997 entry->key.term_mac.copy_to_cpu))
1998 return -EMSGSIZE;
1999
2000 return 0;
2001}
2002
2003static int
2004rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002005 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002006{
Jiri Pirko9b03c712014-12-03 14:14:53 +01002007 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2008 entry->key.ucast_routing.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002009 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002010 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2011 entry->key.ucast_routing.dst4))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002012 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002013 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2014 entry->key.ucast_routing.dst4_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002015 return -EMSGSIZE;
2016 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2017 entry->key.ucast_routing.goto_tbl))
2018 return -EMSGSIZE;
2019 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2020 entry->key.ucast_routing.group_id))
2021 return -EMSGSIZE;
2022
2023 return 0;
2024}
2025
Simon Hormane5054642015-05-25 14:28:36 +09002026static int
2027rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2028 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002029{
2030 if (entry->key.bridge.has_eth_dst &&
2031 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2032 ETH_ALEN, entry->key.bridge.eth_dst))
2033 return -EMSGSIZE;
2034 if (entry->key.bridge.has_eth_dst_mask &&
2035 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2036 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2037 return -EMSGSIZE;
2038 if (entry->key.bridge.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002039 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2040 entry->key.bridge.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002041 return -EMSGSIZE;
2042 if (entry->key.bridge.tunnel_id &&
2043 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2044 entry->key.bridge.tunnel_id))
2045 return -EMSGSIZE;
2046 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2047 entry->key.bridge.goto_tbl))
2048 return -EMSGSIZE;
2049 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2050 entry->key.bridge.group_id))
2051 return -EMSGSIZE;
2052 if (entry->key.bridge.copy_to_cpu &&
2053 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2054 entry->key.bridge.copy_to_cpu))
2055 return -EMSGSIZE;
2056
2057 return 0;
2058}
2059
Simon Hormane5054642015-05-25 14:28:36 +09002060static int
2061rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2062 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002063{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002064 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2065 entry->key.acl.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002066 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002067 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2068 entry->key.acl.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002069 return -EMSGSIZE;
2070 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2071 ETH_ALEN, entry->key.acl.eth_src))
2072 return -EMSGSIZE;
2073 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2074 ETH_ALEN, entry->key.acl.eth_src_mask))
2075 return -EMSGSIZE;
2076 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2077 ETH_ALEN, entry->key.acl.eth_dst))
2078 return -EMSGSIZE;
2079 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2080 ETH_ALEN, entry->key.acl.eth_dst_mask))
2081 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002082 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2083 entry->key.acl.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002084 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002085 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2086 entry->key.acl.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002087 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002088 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2089 entry->key.acl.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002090 return -EMSGSIZE;
2091
2092 switch (ntohs(entry->key.acl.eth_type)) {
2093 case ETH_P_IP:
2094 case ETH_P_IPV6:
2095 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2096 entry->key.acl.ip_proto))
2097 return -EMSGSIZE;
2098 if (rocker_tlv_put_u8(desc_info,
2099 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2100 entry->key.acl.ip_proto_mask))
2101 return -EMSGSIZE;
2102 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2103 entry->key.acl.ip_tos & 0x3f))
2104 return -EMSGSIZE;
2105 if (rocker_tlv_put_u8(desc_info,
2106 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2107 entry->key.acl.ip_tos_mask & 0x3f))
2108 return -EMSGSIZE;
2109 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2110 (entry->key.acl.ip_tos & 0xc0) >> 6))
2111 return -EMSGSIZE;
2112 if (rocker_tlv_put_u8(desc_info,
2113 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2114 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2115 return -EMSGSIZE;
2116 break;
2117 }
2118
2119 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2120 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2121 entry->key.acl.group_id))
2122 return -EMSGSIZE;
2123
2124 return 0;
2125}
2126
Simon Horman534ba6a2015-06-01 13:25:04 +09002127static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002128 struct rocker_desc_info *desc_info,
2129 void *priv)
2130{
Simon Hormane5054642015-05-25 14:28:36 +09002131 const struct rocker_flow_tbl_entry *entry = priv;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002132 struct rocker_tlv *cmd_info;
2133 int err = 0;
2134
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002135 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002136 return -EMSGSIZE;
2137 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2138 if (!cmd_info)
2139 return -EMSGSIZE;
2140 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2141 entry->key.tbl_id))
2142 return -EMSGSIZE;
2143 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2144 entry->key.priority))
2145 return -EMSGSIZE;
2146 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2147 return -EMSGSIZE;
2148 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2149 entry->cookie))
2150 return -EMSGSIZE;
2151
2152 switch (entry->key.tbl_id) {
2153 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2154 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2155 break;
2156 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2157 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2158 break;
2159 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2160 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2161 break;
2162 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2163 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2164 break;
2165 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2166 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2167 break;
2168 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2169 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2170 break;
2171 default:
2172 err = -ENOTSUPP;
2173 break;
2174 }
2175
2176 if (err)
2177 return err;
2178
2179 rocker_tlv_nest_end(desc_info, cmd_info);
2180
2181 return 0;
2182}
2183
Simon Horman534ba6a2015-06-01 13:25:04 +09002184static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002185 struct rocker_desc_info *desc_info,
2186 void *priv)
2187{
2188 const struct rocker_flow_tbl_entry *entry = priv;
2189 struct rocker_tlv *cmd_info;
2190
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002191 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002192 return -EMSGSIZE;
2193 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2194 if (!cmd_info)
2195 return -EMSGSIZE;
2196 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2197 entry->cookie))
2198 return -EMSGSIZE;
2199 rocker_tlv_nest_end(desc_info, cmd_info);
2200
2201 return 0;
2202}
2203
2204static int
2205rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2206 struct rocker_group_tbl_entry *entry)
2207{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002208 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002209 ROCKER_GROUP_PORT_GET(entry->group_id)))
2210 return -EMSGSIZE;
2211 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2212 entry->l2_interface.pop_vlan))
2213 return -EMSGSIZE;
2214
2215 return 0;
2216}
2217
2218static int
2219rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002220 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002221{
2222 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2223 entry->l2_rewrite.group_id))
2224 return -EMSGSIZE;
2225 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2226 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2227 ETH_ALEN, entry->l2_rewrite.eth_src))
2228 return -EMSGSIZE;
2229 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2230 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2231 ETH_ALEN, entry->l2_rewrite.eth_dst))
2232 return -EMSGSIZE;
2233 if (entry->l2_rewrite.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002234 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2235 entry->l2_rewrite.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002236 return -EMSGSIZE;
2237
2238 return 0;
2239}
2240
2241static int
2242rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002243 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002244{
2245 int i;
2246 struct rocker_tlv *group_ids;
2247
2248 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2249 entry->group_count))
2250 return -EMSGSIZE;
2251
2252 group_ids = rocker_tlv_nest_start(desc_info,
2253 ROCKER_TLV_OF_DPA_GROUP_IDS);
2254 if (!group_ids)
2255 return -EMSGSIZE;
2256
2257 for (i = 0; i < entry->group_count; i++)
2258 /* Note TLV array is 1-based */
2259 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2260 return -EMSGSIZE;
2261
2262 rocker_tlv_nest_end(desc_info, group_ids);
2263
2264 return 0;
2265}
2266
2267static int
2268rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002269 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002270{
2271 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2272 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2273 ETH_ALEN, entry->l3_unicast.eth_src))
2274 return -EMSGSIZE;
2275 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2276 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2277 ETH_ALEN, entry->l3_unicast.eth_dst))
2278 return -EMSGSIZE;
2279 if (entry->l3_unicast.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002280 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2281 entry->l3_unicast.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002282 return -EMSGSIZE;
2283 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2284 entry->l3_unicast.ttl_check))
2285 return -EMSGSIZE;
2286 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2287 entry->l3_unicast.group_id))
2288 return -EMSGSIZE;
2289
2290 return 0;
2291}
2292
Simon Horman534ba6a2015-06-01 13:25:04 +09002293static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002294 struct rocker_desc_info *desc_info,
2295 void *priv)
2296{
2297 struct rocker_group_tbl_entry *entry = priv;
2298 struct rocker_tlv *cmd_info;
2299 int err = 0;
2300
2301 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2302 return -EMSGSIZE;
2303 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2304 if (!cmd_info)
2305 return -EMSGSIZE;
2306
2307 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2308 entry->group_id))
2309 return -EMSGSIZE;
2310
2311 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2312 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2313 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2314 break;
2315 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2316 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2317 break;
2318 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2319 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2320 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2321 break;
2322 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2323 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2324 break;
2325 default:
2326 err = -ENOTSUPP;
2327 break;
2328 }
2329
2330 if (err)
2331 return err;
2332
2333 rocker_tlv_nest_end(desc_info, cmd_info);
2334
2335 return 0;
2336}
2337
Simon Horman534ba6a2015-06-01 13:25:04 +09002338static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002339 struct rocker_desc_info *desc_info,
2340 void *priv)
2341{
2342 const struct rocker_group_tbl_entry *entry = priv;
2343 struct rocker_tlv *cmd_info;
2344
2345 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2346 return -EMSGSIZE;
2347 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2348 if (!cmd_info)
2349 return -EMSGSIZE;
2350 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2351 entry->group_id))
2352 return -EMSGSIZE;
2353 rocker_tlv_nest_end(desc_info, cmd_info);
2354
2355 return 0;
2356}
2357
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002358/***************************************************
2359 * Flow, group, FDB, internal VLAN and neigh tables
2360 ***************************************************/
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002361
2362static int rocker_init_tbls(struct rocker *rocker)
2363{
2364 hash_init(rocker->flow_tbl);
2365 spin_lock_init(&rocker->flow_tbl_lock);
2366
2367 hash_init(rocker->group_tbl);
2368 spin_lock_init(&rocker->group_tbl_lock);
2369
2370 hash_init(rocker->fdb_tbl);
2371 spin_lock_init(&rocker->fdb_tbl_lock);
2372
2373 hash_init(rocker->internal_vlan_tbl);
2374 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2375
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002376 hash_init(rocker->neigh_tbl);
2377 spin_lock_init(&rocker->neigh_tbl_lock);
2378
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002379 return 0;
2380}
2381
2382static void rocker_free_tbls(struct rocker *rocker)
2383{
2384 unsigned long flags;
2385 struct rocker_flow_tbl_entry *flow_entry;
2386 struct rocker_group_tbl_entry *group_entry;
2387 struct rocker_fdb_tbl_entry *fdb_entry;
2388 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002389 struct rocker_neigh_tbl_entry *neigh_entry;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002390 struct hlist_node *tmp;
2391 int bkt;
2392
2393 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2394 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2395 hash_del(&flow_entry->entry);
2396 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2397
2398 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2399 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2400 hash_del(&group_entry->entry);
2401 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2402
2403 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2404 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2405 hash_del(&fdb_entry->entry);
2406 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2407
2408 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2409 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2410 tmp, internal_vlan_entry, entry)
2411 hash_del(&internal_vlan_entry->entry);
2412 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002413
2414 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2415 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2416 hash_del(&neigh_entry->entry);
2417 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002418}
2419
2420static struct rocker_flow_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002421rocker_flow_tbl_find(const struct rocker *rocker,
2422 const struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002423{
2424 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002425 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002426
2427 hash_for_each_possible(rocker->flow_tbl, found,
2428 entry, match->key_crc32) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002429 if (memcmp(&found->key, &match->key, key_len) == 0)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002430 return found;
2431 }
2432
2433 return NULL;
2434}
2435
2436static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002437 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002438 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002439{
2440 struct rocker *rocker = rocker_port->rocker;
2441 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002442 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002443 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002444
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002445 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002446
Scott Feldman179f9a22015-06-12 21:35:46 -07002447 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002448
2449 found = rocker_flow_tbl_find(rocker, match);
2450
2451 if (found) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002452 match->cookie = found->cookie;
Scott Feldmanc4f20322015-05-10 09:47:50 -07002453 if (trans != SWITCHDEV_TRANS_PREPARE)
2454 hash_del(&found->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002455 rocker_port_kfree(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002456 found = match;
2457 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002458 } else {
2459 found = match;
2460 found->cookie = rocker->flow_tbl_next_cookie++;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002461 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002462 }
2463
Scott Feldmanc4f20322015-05-10 09:47:50 -07002464 if (trans != SWITCHDEV_TRANS_PREPARE)
2465 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002466
Scott Feldman179f9a22015-06-12 21:35:46 -07002467 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002468
Scott Feldman179f9a22015-06-12 21:35:46 -07002469 return rocker_cmd_exec(rocker_port, trans, flags,
2470 rocker_cmd_flow_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002471}
2472
2473static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002474 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002475 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002476{
2477 struct rocker *rocker = rocker_port->rocker;
2478 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002479 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002480 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002481 int err = 0;
2482
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002483 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002484
Scott Feldman179f9a22015-06-12 21:35:46 -07002485 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002486
2487 found = rocker_flow_tbl_find(rocker, match);
2488
2489 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002490 if (trans != SWITCHDEV_TRANS_PREPARE)
2491 hash_del(&found->entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002492 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002493 }
2494
Scott Feldman179f9a22015-06-12 21:35:46 -07002495 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002496
Simon Horman0985df72015-05-25 14:28:35 +09002497 rocker_port_kfree(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002498
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002499 if (found) {
Scott Feldman179f9a22015-06-12 21:35:46 -07002500 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002501 rocker_cmd_flow_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002502 found, NULL, NULL);
Simon Horman0985df72015-05-25 14:28:35 +09002503 rocker_port_kfree(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002504 }
2505
2506 return err;
2507}
2508
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002509static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002510 enum switchdev_trans trans, int flags,
2511 struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002512{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002513 if (flags & ROCKER_OP_FLAG_REMOVE)
Scott Feldman179f9a22015-06-12 21:35:46 -07002514 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002515 else
Scott Feldman179f9a22015-06-12 21:35:46 -07002516 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002517}
2518
2519static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002520 enum switchdev_trans trans, int flags,
2521 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002522 enum rocker_of_dpa_table_id goto_tbl)
2523{
2524 struct rocker_flow_tbl_entry *entry;
2525
Scott Feldman179f9a22015-06-12 21:35:46 -07002526 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002527 if (!entry)
2528 return -ENOMEM;
2529
2530 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2531 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002532 entry->key.ig_port.in_pport = in_pport;
2533 entry->key.ig_port.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002534 entry->key.ig_port.goto_tbl = goto_tbl;
2535
Scott Feldmanc4f20322015-05-10 09:47:50 -07002536 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002537}
2538
2539static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002540 enum switchdev_trans trans, int flags,
2541 u32 in_pport, __be16 vlan_id,
2542 __be16 vlan_id_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002543 enum rocker_of_dpa_table_id goto_tbl,
2544 bool untagged, __be16 new_vlan_id)
2545{
2546 struct rocker_flow_tbl_entry *entry;
2547
Scott Feldman179f9a22015-06-12 21:35:46 -07002548 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002549 if (!entry)
2550 return -ENOMEM;
2551
2552 entry->key.priority = ROCKER_PRIORITY_VLAN;
2553 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002554 entry->key.vlan.in_pport = in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002555 entry->key.vlan.vlan_id = vlan_id;
2556 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2557 entry->key.vlan.goto_tbl = goto_tbl;
2558
2559 entry->key.vlan.untagged = untagged;
2560 entry->key.vlan.new_vlan_id = new_vlan_id;
2561
Scott Feldmanc4f20322015-05-10 09:47:50 -07002562 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002563}
2564
2565static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002566 enum switchdev_trans trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002567 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002568 __be16 eth_type, const u8 *eth_dst,
2569 const u8 *eth_dst_mask, __be16 vlan_id,
2570 __be16 vlan_id_mask, bool copy_to_cpu,
2571 int flags)
2572{
2573 struct rocker_flow_tbl_entry *entry;
2574
Scott Feldman179f9a22015-06-12 21:35:46 -07002575 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002576 if (!entry)
2577 return -ENOMEM;
2578
2579 if (is_multicast_ether_addr(eth_dst)) {
2580 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2581 entry->key.term_mac.goto_tbl =
2582 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2583 } else {
2584 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2585 entry->key.term_mac.goto_tbl =
2586 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2587 }
2588
2589 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002590 entry->key.term_mac.in_pport = in_pport;
2591 entry->key.term_mac.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002592 entry->key.term_mac.eth_type = eth_type;
2593 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2594 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2595 entry->key.term_mac.vlan_id = vlan_id;
2596 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2597 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2598
Scott Feldmanc4f20322015-05-10 09:47:50 -07002599 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002600}
2601
2602static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002603 enum switchdev_trans trans, int flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002604 const u8 *eth_dst, const u8 *eth_dst_mask,
2605 __be16 vlan_id, u32 tunnel_id,
2606 enum rocker_of_dpa_table_id goto_tbl,
2607 u32 group_id, bool copy_to_cpu)
2608{
2609 struct rocker_flow_tbl_entry *entry;
2610 u32 priority;
2611 bool vlan_bridging = !!vlan_id;
2612 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2613 bool wild = false;
2614
Scott Feldman179f9a22015-06-12 21:35:46 -07002615 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002616 if (!entry)
2617 return -ENOMEM;
2618
2619 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2620
2621 if (eth_dst) {
2622 entry->key.bridge.has_eth_dst = 1;
2623 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2624 }
2625 if (eth_dst_mask) {
2626 entry->key.bridge.has_eth_dst_mask = 1;
2627 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
Simon Horman629161f2015-04-30 15:21:29 +09002628 if (!ether_addr_equal(eth_dst_mask, ff_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002629 wild = true;
2630 }
2631
2632 priority = ROCKER_PRIORITY_UNKNOWN;
Thomas Graf51ace882014-11-28 14:34:32 +01002633 if (vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002634 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002635 else if (vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002636 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002637 else if (vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002638 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
Thomas Graf51ace882014-11-28 14:34:32 +01002639 else if (!vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002640 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002641 else if (!vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002642 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002643 else if (!vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002644 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2645
2646 entry->key.priority = priority;
2647 entry->key.bridge.vlan_id = vlan_id;
2648 entry->key.bridge.tunnel_id = tunnel_id;
2649 entry->key.bridge.goto_tbl = goto_tbl;
2650 entry->key.bridge.group_id = group_id;
2651 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2652
Scott Feldmanc4f20322015-05-10 09:47:50 -07002653 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002654}
2655
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002656static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002657 enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002658 __be16 eth_type, __be32 dst,
2659 __be32 dst_mask, u32 priority,
2660 enum rocker_of_dpa_table_id goto_tbl,
2661 u32 group_id, int flags)
2662{
2663 struct rocker_flow_tbl_entry *entry;
2664
Scott Feldman179f9a22015-06-12 21:35:46 -07002665 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002666 if (!entry)
2667 return -ENOMEM;
2668
2669 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2670 entry->key.priority = priority;
2671 entry->key.ucast_routing.eth_type = eth_type;
2672 entry->key.ucast_routing.dst4 = dst;
2673 entry->key.ucast_routing.dst4_mask = dst_mask;
2674 entry->key.ucast_routing.goto_tbl = goto_tbl;
2675 entry->key.ucast_routing.group_id = group_id;
2676 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2677 ucast_routing.group_id);
2678
Scott Feldmanc4f20322015-05-10 09:47:50 -07002679 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002680}
2681
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002682static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002683 enum switchdev_trans trans, int flags,
2684 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002685 const u8 *eth_src, const u8 *eth_src_mask,
2686 const u8 *eth_dst, const u8 *eth_dst_mask,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002687 __be16 eth_type, __be16 vlan_id,
2688 __be16 vlan_id_mask, u8 ip_proto,
2689 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002690 u32 group_id)
2691{
2692 u32 priority;
2693 struct rocker_flow_tbl_entry *entry;
2694
Scott Feldman179f9a22015-06-12 21:35:46 -07002695 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002696 if (!entry)
2697 return -ENOMEM;
2698
2699 priority = ROCKER_PRIORITY_ACL_NORMAL;
2700 if (eth_dst && eth_dst_mask) {
Simon Horman629161f2015-04-30 15:21:29 +09002701 if (ether_addr_equal(eth_dst_mask, mcast_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002702 priority = ROCKER_PRIORITY_ACL_DFLT;
2703 else if (is_link_local_ether_addr(eth_dst))
2704 priority = ROCKER_PRIORITY_ACL_CTRL;
2705 }
2706
2707 entry->key.priority = priority;
2708 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002709 entry->key.acl.in_pport = in_pport;
2710 entry->key.acl.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002711
2712 if (eth_src)
2713 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2714 if (eth_src_mask)
2715 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2716 if (eth_dst)
2717 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2718 if (eth_dst_mask)
2719 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2720
2721 entry->key.acl.eth_type = eth_type;
2722 entry->key.acl.vlan_id = vlan_id;
2723 entry->key.acl.vlan_id_mask = vlan_id_mask;
2724 entry->key.acl.ip_proto = ip_proto;
2725 entry->key.acl.ip_proto_mask = ip_proto_mask;
2726 entry->key.acl.ip_tos = ip_tos;
2727 entry->key.acl.ip_tos_mask = ip_tos_mask;
2728 entry->key.acl.group_id = group_id;
2729
Scott Feldmanc4f20322015-05-10 09:47:50 -07002730 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002731}
2732
2733static struct rocker_group_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002734rocker_group_tbl_find(const struct rocker *rocker,
2735 const struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002736{
2737 struct rocker_group_tbl_entry *found;
2738
2739 hash_for_each_possible(rocker->group_tbl, found,
2740 entry, match->group_id) {
2741 if (found->group_id == match->group_id)
2742 return found;
2743 }
2744
2745 return NULL;
2746}
2747
Simon Horman0985df72015-05-25 14:28:35 +09002748static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002749 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002750{
2751 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2752 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2753 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
Simon Horman0985df72015-05-25 14:28:35 +09002754 rocker_port_kfree(trans, entry->group_ids);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002755 break;
2756 default:
2757 break;
2758 }
Simon Horman0985df72015-05-25 14:28:35 +09002759 rocker_port_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002760}
2761
2762static int rocker_group_tbl_add(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002763 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002764 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002765{
2766 struct rocker *rocker = rocker_port->rocker;
2767 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002768 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002769
Scott Feldman179f9a22015-06-12 21:35:46 -07002770 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002771
2772 found = rocker_group_tbl_find(rocker, match);
2773
2774 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002775 if (trans != SWITCHDEV_TRANS_PREPARE)
2776 hash_del(&found->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002777 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002778 found = match;
2779 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2780 } else {
2781 found = match;
2782 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2783 }
2784
Scott Feldmanc4f20322015-05-10 09:47:50 -07002785 if (trans != SWITCHDEV_TRANS_PREPARE)
2786 hash_add(rocker->group_tbl, &found->entry, found->group_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002787
Scott Feldman179f9a22015-06-12 21:35:46 -07002788 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002789
Scott Feldman179f9a22015-06-12 21:35:46 -07002790 return rocker_cmd_exec(rocker_port, trans, flags,
2791 rocker_cmd_group_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002792}
2793
2794static int rocker_group_tbl_del(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07002795 enum switchdev_trans trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002796 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002797{
2798 struct rocker *rocker = rocker_port->rocker;
2799 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002800 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002801 int err = 0;
2802
Scott Feldman179f9a22015-06-12 21:35:46 -07002803 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002804
2805 found = rocker_group_tbl_find(rocker, match);
2806
2807 if (found) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07002808 if (trans != SWITCHDEV_TRANS_PREPARE)
2809 hash_del(&found->entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002810 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2811 }
2812
Scott Feldman179f9a22015-06-12 21:35:46 -07002813 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002814
Simon Horman0985df72015-05-25 14:28:35 +09002815 rocker_group_tbl_entry_free(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002816
2817 if (found) {
Scott Feldman179f9a22015-06-12 21:35:46 -07002818 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002819 rocker_cmd_group_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002820 found, NULL, NULL);
Simon Horman0985df72015-05-25 14:28:35 +09002821 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002822 }
2823
2824 return err;
2825}
2826
2827static int rocker_group_tbl_do(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002828 enum switchdev_trans trans, int flags,
2829 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002830{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002831 if (flags & ROCKER_OP_FLAG_REMOVE)
Scott Feldman179f9a22015-06-12 21:35:46 -07002832 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002833 else
Scott Feldman179f9a22015-06-12 21:35:46 -07002834 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002835}
2836
2837static int rocker_group_l2_interface(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002838 enum switchdev_trans trans, int flags,
2839 __be16 vlan_id, u32 out_pport,
2840 int pop_vlan)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002841{
2842 struct rocker_group_tbl_entry *entry;
2843
Scott Feldman179f9a22015-06-12 21:35:46 -07002844 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002845 if (!entry)
2846 return -ENOMEM;
2847
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002848 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002849 entry->l2_interface.pop_vlan = pop_vlan;
2850
Scott Feldmanc4f20322015-05-10 09:47:50 -07002851 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002852}
2853
2854static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002855 enum switchdev_trans trans,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002856 int flags, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002857 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002858{
2859 struct rocker_group_tbl_entry *entry;
2860
Scott Feldman179f9a22015-06-12 21:35:46 -07002861 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002862 if (!entry)
2863 return -ENOMEM;
2864
2865 entry->group_id = group_id;
2866 entry->group_count = group_count;
2867
Scott Feldman179f9a22015-06-12 21:35:46 -07002868 entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
2869 group_count, sizeof(u32));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002870 if (!entry->group_ids) {
Simon Horman0985df72015-05-25 14:28:35 +09002871 rocker_port_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002872 return -ENOMEM;
2873 }
2874 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2875
Scott Feldmanc4f20322015-05-10 09:47:50 -07002876 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002877}
2878
2879static int rocker_group_l2_flood(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002880 enum switchdev_trans trans, int flags,
2881 __be16 vlan_id, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002882 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002883{
Scott Feldmanc4f20322015-05-10 09:47:50 -07002884 return rocker_group_l2_fan_out(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002885 group_count, group_ids,
2886 group_id);
2887}
2888
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002889static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002890 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09002891 u32 index, const u8 *src_mac, const u8 *dst_mac,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002892 __be16 vlan_id, bool ttl_check, u32 pport)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002893{
2894 struct rocker_group_tbl_entry *entry;
2895
Scott Feldman179f9a22015-06-12 21:35:46 -07002896 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002897 if (!entry)
2898 return -ENOMEM;
2899
2900 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2901 if (src_mac)
2902 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2903 if (dst_mac)
2904 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2905 entry->l3_unicast.vlan_id = vlan_id;
2906 entry->l3_unicast.ttl_check = ttl_check;
2907 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2908
Scott Feldmanc4f20322015-05-10 09:47:50 -07002909 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002910}
2911
2912static struct rocker_neigh_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002913rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002914{
2915 struct rocker_neigh_tbl_entry *found;
2916
Scott Feldman0f43deb2015-03-06 15:54:51 -08002917 hash_for_each_possible(rocker->neigh_tbl, found,
2918 entry, be32_to_cpu(ip_addr))
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002919 if (found->ip_addr == ip_addr)
2920 return found;
2921
2922 return NULL;
2923}
2924
2925static void _rocker_neigh_add(struct rocker *rocker,
Simon Horman550ecc92015-05-21 12:40:16 +09002926 enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002927 struct rocker_neigh_tbl_entry *entry)
2928{
Scott Feldman4d81db42015-06-12 21:24:40 -07002929 if (trans != SWITCHDEV_TRANS_COMMIT)
2930 entry->index = rocker->neigh_tbl_next_index++;
Simon Horman550ecc92015-05-21 12:40:16 +09002931 if (trans == SWITCHDEV_TRANS_PREPARE)
2932 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002933 entry->ref_count++;
Scott Feldman0f43deb2015-03-06 15:54:51 -08002934 hash_add(rocker->neigh_tbl, &entry->entry,
2935 be32_to_cpu(entry->ip_addr));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002936}
2937
Simon Horman0985df72015-05-25 14:28:35 +09002938static void _rocker_neigh_del(enum switchdev_trans trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002939 struct rocker_neigh_tbl_entry *entry)
2940{
Simon Horman550ecc92015-05-21 12:40:16 +09002941 if (trans == SWITCHDEV_TRANS_PREPARE)
2942 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002943 if (--entry->ref_count == 0) {
2944 hash_del(&entry->entry);
Simon Horman0985df72015-05-25 14:28:35 +09002945 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002946 }
2947}
2948
Scott Feldmanc4f20322015-05-10 09:47:50 -07002949static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
Simon Horman550ecc92015-05-21 12:40:16 +09002950 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09002951 const u8 *eth_dst, bool ttl_check)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002952{
2953 if (eth_dst) {
2954 ether_addr_copy(entry->eth_dst, eth_dst);
2955 entry->ttl_check = ttl_check;
Simon Horman550ecc92015-05-21 12:40:16 +09002956 } else if (trans != SWITCHDEV_TRANS_PREPARE) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002957 entry->ref_count++;
2958 }
2959}
2960
2961static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002962 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09002963 int flags, __be32 ip_addr, const u8 *eth_dst)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002964{
2965 struct rocker *rocker = rocker_port->rocker;
2966 struct rocker_neigh_tbl_entry *entry;
2967 struct rocker_neigh_tbl_entry *found;
2968 unsigned long lock_flags;
2969 __be16 eth_type = htons(ETH_P_IP);
2970 enum rocker_of_dpa_table_id goto_tbl =
2971 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2972 u32 group_id;
2973 u32 priority = 0;
2974 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2975 bool updating;
2976 bool removing;
2977 int err = 0;
2978
Scott Feldman179f9a22015-06-12 21:35:46 -07002979 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002980 if (!entry)
2981 return -ENOMEM;
2982
2983 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2984
2985 found = rocker_neigh_tbl_find(rocker, ip_addr);
2986
2987 updating = found && adding;
2988 removing = found && !adding;
2989 adding = !found && adding;
2990
2991 if (adding) {
2992 entry->ip_addr = ip_addr;
2993 entry->dev = rocker_port->dev;
2994 ether_addr_copy(entry->eth_dst, eth_dst);
2995 entry->ttl_check = true;
Simon Horman550ecc92015-05-21 12:40:16 +09002996 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002997 } else if (removing) {
2998 memcpy(entry, found, sizeof(*entry));
Simon Horman0985df72015-05-25 14:28:35 +09002999 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003000 } else if (updating) {
Simon Horman550ecc92015-05-21 12:40:16 +09003001 _rocker_neigh_update(found, trans, eth_dst, true);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003002 memcpy(entry, found, sizeof(*entry));
3003 } else {
3004 err = -ENOENT;
3005 }
3006
3007 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3008
3009 if (err)
3010 goto err_out;
3011
3012 /* For each active neighbor, we have an L3 unicast group and
3013 * a /32 route to the neighbor, which uses the L3 unicast
3014 * group. The L3 unicast group can also be referred to by
3015 * other routes' nexthops.
3016 */
3017
Scott Feldmanc4f20322015-05-10 09:47:50 -07003018 err = rocker_group_l3_unicast(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003019 entry->index,
3020 rocker_port->dev->dev_addr,
3021 entry->eth_dst,
3022 rocker_port->internal_vlan_id,
3023 entry->ttl_check,
3024 rocker_port->pport);
3025 if (err) {
3026 netdev_err(rocker_port->dev,
3027 "Error (%d) L3 unicast group index %d\n",
3028 err, entry->index);
3029 goto err_out;
3030 }
3031
3032 if (adding || removing) {
3033 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003034 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003035 eth_type, ip_addr,
3036 inet_make_mask(32),
3037 priority, goto_tbl,
3038 group_id, flags);
3039
3040 if (err)
3041 netdev_err(rocker_port->dev,
3042 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3043 err, &entry->ip_addr, group_id);
3044 }
3045
3046err_out:
3047 if (!adding)
Simon Horman0985df72015-05-25 14:28:35 +09003048 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003049
3050 return err;
3051}
3052
3053static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003054 enum switchdev_trans trans, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003055{
3056 struct net_device *dev = rocker_port->dev;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003057 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003058 int err = 0;
3059
Ying Xue4133fc02015-05-15 12:53:21 +08003060 if (!n) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003061 n = neigh_create(&arp_tbl, &ip_addr, dev);
Ying Xue4133fc02015-05-15 12:53:21 +08003062 if (IS_ERR(n))
3063 return IS_ERR(n);
3064 }
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003065
3066 /* If the neigh is already resolved, then go ahead and
3067 * install the entry, otherwise start the ARP process to
3068 * resolve the neigh.
3069 */
3070
3071 if (n->nud_state & NUD_VALID)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003072 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3073 ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003074 else
3075 neigh_event_send(n, NULL);
3076
Ying Xue4133fc02015-05-15 12:53:21 +08003077 neigh_release(n);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003078 return err;
3079}
3080
Scott Feldmanc4f20322015-05-10 09:47:50 -07003081static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3082 enum switchdev_trans trans, int flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003083 __be32 ip_addr, u32 *index)
3084{
3085 struct rocker *rocker = rocker_port->rocker;
3086 struct rocker_neigh_tbl_entry *entry;
3087 struct rocker_neigh_tbl_entry *found;
3088 unsigned long lock_flags;
3089 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3090 bool updating;
3091 bool removing;
3092 bool resolved = true;
3093 int err = 0;
3094
Scott Feldman179f9a22015-06-12 21:35:46 -07003095 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003096 if (!entry)
3097 return -ENOMEM;
3098
3099 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3100
3101 found = rocker_neigh_tbl_find(rocker, ip_addr);
3102 if (found)
3103 *index = found->index;
3104
3105 updating = found && adding;
3106 removing = found && !adding;
3107 adding = !found && adding;
3108
3109 if (adding) {
3110 entry->ip_addr = ip_addr;
3111 entry->dev = rocker_port->dev;
Simon Horman550ecc92015-05-21 12:40:16 +09003112 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003113 *index = entry->index;
3114 resolved = false;
3115 } else if (removing) {
Simon Horman0985df72015-05-25 14:28:35 +09003116 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003117 } else if (updating) {
Simon Horman550ecc92015-05-21 12:40:16 +09003118 _rocker_neigh_update(found, trans, NULL, false);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003119 resolved = !is_zero_ether_addr(found->eth_dst);
3120 } else {
3121 err = -ENOENT;
3122 }
3123
3124 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3125
3126 if (!adding)
Simon Horman0985df72015-05-25 14:28:35 +09003127 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003128
3129 if (err)
3130 return err;
3131
3132 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3133
3134 if (!resolved)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003135 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003136
3137 return err;
3138}
3139
Scott Feldman6c707942014-11-28 14:34:28 +01003140static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003141 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003142 int flags, __be16 vlan_id)
3143{
3144 struct rocker_port *p;
Simon Hormane5054642015-05-25 14:28:36 +09003145 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003146 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
Scott Feldman04f49fa2015-03-15 23:04:46 -07003147 u32 *group_ids;
Scott Feldman6c707942014-11-28 14:34:28 +01003148 u8 group_count = 0;
Scott Feldman04f49fa2015-03-15 23:04:46 -07003149 int err = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01003150 int i;
3151
Scott Feldman179f9a22015-06-12 21:35:46 -07003152 group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
3153 rocker->port_count, sizeof(u32));
Scott Feldman04f49fa2015-03-15 23:04:46 -07003154 if (!group_ids)
3155 return -ENOMEM;
3156
Scott Feldman6c707942014-11-28 14:34:28 +01003157 /* Adjust the flood group for this VLAN. The flood group
3158 * references an L2 interface group for each port in this
3159 * VLAN.
3160 */
3161
3162 for (i = 0; i < rocker->port_count; i++) {
3163 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003164 if (!p)
3165 continue;
Scott Feldman6c707942014-11-28 14:34:28 +01003166 if (!rocker_port_is_bridged(p))
3167 continue;
3168 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3169 group_ids[group_count++] =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003170 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003171 }
3172 }
3173
3174 /* If there are no bridged ports in this VLAN, we're done */
3175 if (group_count == 0)
Scott Feldman04f49fa2015-03-15 23:04:46 -07003176 goto no_ports_in_vlan;
Scott Feldman6c707942014-11-28 14:34:28 +01003177
Scott Feldmanc4f20322015-05-10 09:47:50 -07003178 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3179 group_count, group_ids, group_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003180 if (err)
3181 netdev_err(rocker_port->dev,
3182 "Error (%d) port VLAN l2 flood group\n", err);
3183
Scott Feldman04f49fa2015-03-15 23:04:46 -07003184no_ports_in_vlan:
Simon Horman0985df72015-05-25 14:28:35 +09003185 rocker_port_kfree(trans, group_ids);
Scott Feldman6c707942014-11-28 14:34:28 +01003186 return err;
3187}
3188
3189static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003190 enum switchdev_trans trans, int flags,
3191 __be16 vlan_id, bool pop_vlan)
Scott Feldman6c707942014-11-28 14:34:28 +01003192{
Simon Hormane5054642015-05-25 14:28:36 +09003193 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003194 struct rocker_port *p;
3195 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003196 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003197 int ref = 0;
3198 int err;
3199 int i;
3200
3201 /* An L2 interface group for this port in this VLAN, but
3202 * only when port STP state is LEARNING|FORWARDING.
3203 */
3204
3205 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3206 rocker_port->stp_state == BR_STATE_FORWARDING) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003207 out_pport = rocker_port->pport;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003208 err = rocker_group_l2_interface(rocker_port, trans, flags,
3209 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003210 if (err) {
3211 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003212 "Error (%d) port VLAN l2 group for pport %d\n",
3213 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003214 return err;
3215 }
3216 }
3217
3218 /* An L2 interface group for this VLAN to CPU port.
3219 * Add when first port joins this VLAN and destroy when
3220 * last port leaves this VLAN.
3221 */
3222
3223 for (i = 0; i < rocker->port_count; i++) {
3224 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003225 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003226 ref++;
3227 }
3228
3229 if ((!adding || ref != 1) && (adding || ref != 0))
3230 return 0;
3231
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003232 out_pport = 0;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003233 err = rocker_group_l2_interface(rocker_port, trans, flags,
3234 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003235 if (err) {
3236 netdev_err(rocker_port->dev,
3237 "Error (%d) port VLAN l2 group for CPU port\n", err);
3238 return err;
3239 }
3240
3241 return 0;
3242}
3243
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003244static struct rocker_ctrl {
3245 const u8 *eth_dst;
3246 const u8 *eth_dst_mask;
Jiri Pirko11e6c652014-12-03 14:14:54 +01003247 __be16 eth_type;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003248 bool acl;
3249 bool bridge;
3250 bool term;
3251 bool copy_to_cpu;
3252} rocker_ctrls[] = {
3253 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3254 /* pass link local multicast pkts up to CPU for filtering */
3255 .eth_dst = ll_mac,
3256 .eth_dst_mask = ll_mask,
3257 .acl = true,
3258 },
3259 [ROCKER_CTRL_LOCAL_ARP] = {
3260 /* pass local ARP pkts up to CPU */
3261 .eth_dst = zero_mac,
3262 .eth_dst_mask = zero_mac,
3263 .eth_type = htons(ETH_P_ARP),
3264 .acl = true,
3265 },
3266 [ROCKER_CTRL_IPV4_MCAST] = {
3267 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3268 .eth_dst = ipv4_mcast,
3269 .eth_dst_mask = ipv4_mask,
3270 .eth_type = htons(ETH_P_IP),
3271 .term = true,
3272 .copy_to_cpu = true,
3273 },
3274 [ROCKER_CTRL_IPV6_MCAST] = {
3275 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3276 .eth_dst = ipv6_mcast,
3277 .eth_dst_mask = ipv6_mask,
3278 .eth_type = htons(ETH_P_IPV6),
3279 .term = true,
3280 .copy_to_cpu = true,
3281 },
3282 [ROCKER_CTRL_DFLT_BRIDGING] = {
3283 /* flood any pkts on vlan */
3284 .bridge = true,
3285 .copy_to_cpu = true,
3286 },
Simon Horman82549732015-07-16 10:39:14 +09003287 [ROCKER_CTRL_DFLT_OVS] = {
3288 /* pass all pkts up to CPU */
3289 .eth_dst = zero_mac,
3290 .eth_dst_mask = zero_mac,
3291 .acl = true,
3292 },
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003293};
3294
3295static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003296 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003297 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003298{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003299 u32 in_pport = rocker_port->pport;
3300 u32 in_pport_mask = 0xffffffff;
3301 u32 out_pport = 0;
Simon Hormane5054642015-05-25 14:28:36 +09003302 const u8 *eth_src = NULL;
3303 const u8 *eth_src_mask = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003304 __be16 vlan_id_mask = htons(0xffff);
3305 u8 ip_proto = 0;
3306 u8 ip_proto_mask = 0;
3307 u8 ip_tos = 0;
3308 u8 ip_tos_mask = 0;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003309 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003310 int err;
3311
Scott Feldmanc4f20322015-05-10 09:47:50 -07003312 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003313 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003314 eth_src, eth_src_mask,
3315 ctrl->eth_dst, ctrl->eth_dst_mask,
3316 ctrl->eth_type,
3317 vlan_id, vlan_id_mask,
3318 ip_proto, ip_proto_mask,
3319 ip_tos, ip_tos_mask,
3320 group_id);
3321
3322 if (err)
3323 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3324
3325 return err;
3326}
3327
Scott Feldman6c707942014-11-28 14:34:28 +01003328static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003329 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003330 const struct rocker_ctrl *ctrl,
Scott Feldman6c707942014-11-28 14:34:28 +01003331 __be16 vlan_id)
3332{
3333 enum rocker_of_dpa_table_id goto_tbl =
3334 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3335 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3336 u32 tunnel_id = 0;
3337 int err;
3338
3339 if (!rocker_port_is_bridged(rocker_port))
3340 return 0;
3341
Scott Feldmanc4f20322015-05-10 09:47:50 -07003342 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003343 ctrl->eth_dst, ctrl->eth_dst_mask,
3344 vlan_id, tunnel_id,
3345 goto_tbl, group_id, ctrl->copy_to_cpu);
3346
3347 if (err)
3348 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3349
3350 return err;
3351}
3352
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003353static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003354 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003355 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003356{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003357 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003358 __be16 vlan_id_mask = htons(0xffff);
3359 int err;
3360
3361 if (ntohs(vlan_id) == 0)
3362 vlan_id = rocker_port->internal_vlan_id;
3363
Scott Feldmanc4f20322015-05-10 09:47:50 -07003364 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003365 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003366 ctrl->eth_type, ctrl->eth_dst,
3367 ctrl->eth_dst_mask, vlan_id,
3368 vlan_id_mask, ctrl->copy_to_cpu,
3369 flags);
3370
3371 if (err)
3372 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3373
3374 return err;
3375}
3376
Scott Feldmanc4f20322015-05-10 09:47:50 -07003377static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3378 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003379 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003380{
3381 if (ctrl->acl)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003382 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003383 ctrl, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003384 if (ctrl->bridge)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003385 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003386 ctrl, vlan_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003387
3388 if (ctrl->term)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003389 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003390 ctrl, vlan_id);
3391
3392 return -EOPNOTSUPP;
3393}
3394
3395static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003396 enum switchdev_trans trans, int flags,
3397 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003398{
3399 int err = 0;
3400 int i;
3401
3402 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3403 if (rocker_port->ctrls[i]) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003404 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003405 &rocker_ctrls[i], vlan_id);
3406 if (err)
3407 return err;
3408 }
3409 }
3410
3411 return err;
3412}
3413
Scott Feldmanc4f20322015-05-10 09:47:50 -07003414static int rocker_port_ctrl(struct rocker_port *rocker_port,
3415 enum switchdev_trans trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003416 const struct rocker_ctrl *ctrl)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003417{
3418 u16 vid;
3419 int err = 0;
3420
3421 for (vid = 1; vid < VLAN_N_VID; vid++) {
3422 if (!test_bit(vid, rocker_port->vlan_bitmap))
3423 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003424 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003425 ctrl, htons(vid));
3426 if (err)
3427 break;
3428 }
3429
3430 return err;
3431}
3432
Scott Feldmanc4f20322015-05-10 09:47:50 -07003433static int rocker_port_vlan(struct rocker_port *rocker_port,
3434 enum switchdev_trans trans, int flags, u16 vid)
Scott Feldman6c707942014-11-28 14:34:28 +01003435{
3436 enum rocker_of_dpa_table_id goto_tbl =
3437 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003438 u32 in_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003439 __be16 vlan_id = htons(vid);
3440 __be16 vlan_id_mask = htons(0xffff);
3441 __be16 internal_vlan_id;
3442 bool untagged;
3443 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3444 int err;
3445
3446 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3447
Scott Feldman9228ad22015-05-10 09:47:54 -07003448 if (adding && test_bit(ntohs(internal_vlan_id),
3449 rocker_port->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003450 return 0; /* already added */
Scott Feldman9228ad22015-05-10 09:47:54 -07003451 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3452 rocker_port->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003453 return 0; /* already removed */
3454
Scott Feldman9228ad22015-05-10 09:47:54 -07003455 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3456
Scott Feldman6c707942014-11-28 14:34:28 +01003457 if (adding) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003458 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003459 internal_vlan_id);
3460 if (err) {
3461 netdev_err(rocker_port->dev,
3462 "Error (%d) port ctrl vlan add\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003463 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003464 }
3465 }
3466
Scott Feldmanc4f20322015-05-10 09:47:50 -07003467 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003468 internal_vlan_id, untagged);
3469 if (err) {
3470 netdev_err(rocker_port->dev,
3471 "Error (%d) port VLAN l2 groups\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003472 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003473 }
3474
Scott Feldmanc4f20322015-05-10 09:47:50 -07003475 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003476 internal_vlan_id);
3477 if (err) {
3478 netdev_err(rocker_port->dev,
3479 "Error (%d) port VLAN l2 flood group\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003480 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003481 }
3482
Scott Feldmanc4f20322015-05-10 09:47:50 -07003483 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003484 in_pport, vlan_id, vlan_id_mask,
Scott Feldman6c707942014-11-28 14:34:28 +01003485 goto_tbl, untagged, internal_vlan_id);
3486 if (err)
3487 netdev_err(rocker_port->dev,
3488 "Error (%d) port VLAN table\n", err);
3489
Scott Feldman9228ad22015-05-10 09:47:54 -07003490err_out:
3491 if (trans == SWITCHDEV_TRANS_PREPARE)
3492 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3493
Scott Feldman6c707942014-11-28 14:34:28 +01003494 return err;
3495}
3496
Scott Feldmanc4f20322015-05-10 09:47:50 -07003497static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3498 enum switchdev_trans trans, int flags)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003499{
3500 enum rocker_of_dpa_table_id goto_tbl;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003501 u32 in_pport;
3502 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003503 int err;
3504
3505 /* Normal Ethernet Frames. Matches pkts from any local physical
3506 * ports. Goto VLAN tbl.
3507 */
3508
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003509 in_pport = 0;
3510 in_pport_mask = 0xffff0000;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003511 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3512
Scott Feldmanc4f20322015-05-10 09:47:50 -07003513 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003514 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003515 goto_tbl);
3516 if (err)
3517 netdev_err(rocker_port->dev,
3518 "Error (%d) ingress port table entry\n", err);
3519
3520 return err;
3521}
3522
Scott Feldman6c707942014-11-28 14:34:28 +01003523struct rocker_fdb_learn_work {
3524 struct work_struct work;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003525 struct rocker_port *rocker_port;
3526 enum switchdev_trans trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003527 int flags;
3528 u8 addr[ETH_ALEN];
3529 u16 vid;
3530};
3531
3532static void rocker_port_fdb_learn_work(struct work_struct *work)
3533{
Simon Hormane5054642015-05-25 14:28:36 +09003534 const struct rocker_fdb_learn_work *lw =
Scott Feldman6c707942014-11-28 14:34:28 +01003535 container_of(work, struct rocker_fdb_learn_work, work);
3536 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3537 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003538 struct switchdev_notifier_fdb_info info;
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003539
3540 info.addr = lw->addr;
3541 info.vid = lw->vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003542
Thomas Graf51ace882014-11-28 14:34:32 +01003543 if (learned && removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003544 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003545 lw->rocker_port->dev, &info.info);
Thomas Graf51ace882014-11-28 14:34:32 +01003546 else if (learned && !removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003547 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003548 lw->rocker_port->dev, &info.info);
Scott Feldman6c707942014-11-28 14:34:28 +01003549
Simon Horman0985df72015-05-25 14:28:35 +09003550 rocker_port_kfree(lw->trans, work);
Scott Feldman6c707942014-11-28 14:34:28 +01003551}
3552
3553static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003554 enum switchdev_trans trans, int flags,
3555 const u8 *addr, __be16 vlan_id)
Scott Feldman6c707942014-11-28 14:34:28 +01003556{
3557 struct rocker_fdb_learn_work *lw;
3558 enum rocker_of_dpa_table_id goto_tbl =
3559 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003560 u32 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003561 u32 tunnel_id = 0;
3562 u32 group_id = ROCKER_GROUP_NONE;
Scott Feldman5111f802014-11-28 14:34:30 +01003563 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
Scott Feldman6c707942014-11-28 14:34:28 +01003564 bool copy_to_cpu = false;
3565 int err;
3566
3567 if (rocker_port_is_bridged(rocker_port))
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003568 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003569
3570 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003571 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3572 NULL, vlan_id, tunnel_id, goto_tbl,
Scott Feldman6c707942014-11-28 14:34:28 +01003573 group_id, copy_to_cpu);
3574 if (err)
3575 return err;
3576 }
3577
Scott Feldman5111f802014-11-28 14:34:30 +01003578 if (!syncing)
3579 return 0;
3580
Scott Feldman6c707942014-11-28 14:34:28 +01003581 if (!rocker_port_is_bridged(rocker_port))
3582 return 0;
3583
Scott Feldman179f9a22015-06-12 21:35:46 -07003584 lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
Scott Feldman6c707942014-11-28 14:34:28 +01003585 if (!lw)
3586 return -ENOMEM;
3587
3588 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3589
Scott Feldmanc4f20322015-05-10 09:47:50 -07003590 lw->rocker_port = rocker_port;
3591 lw->trans = trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003592 lw->flags = flags;
3593 ether_addr_copy(lw->addr, addr);
3594 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3595
Scott Feldmanc4f20322015-05-10 09:47:50 -07003596 if (trans == SWITCHDEV_TRANS_PREPARE)
Simon Horman0985df72015-05-25 14:28:35 +09003597 rocker_port_kfree(trans, lw);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003598 else
3599 schedule_work(&lw->work);
Scott Feldman6c707942014-11-28 14:34:28 +01003600
3601 return 0;
3602}
3603
3604static struct rocker_fdb_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003605rocker_fdb_tbl_find(const struct rocker *rocker,
3606 const struct rocker_fdb_tbl_entry *match)
Scott Feldman6c707942014-11-28 14:34:28 +01003607{
3608 struct rocker_fdb_tbl_entry *found;
3609
3610 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3611 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3612 return found;
3613
3614 return NULL;
3615}
3616
3617static int rocker_port_fdb(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003618 enum switchdev_trans trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003619 const unsigned char *addr,
3620 __be16 vlan_id, int flags)
3621{
3622 struct rocker *rocker = rocker_port->rocker;
3623 struct rocker_fdb_tbl_entry *fdb;
3624 struct rocker_fdb_tbl_entry *found;
3625 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3626 unsigned long lock_flags;
3627
Scott Feldman179f9a22015-06-12 21:35:46 -07003628 fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
Scott Feldman6c707942014-11-28 14:34:28 +01003629 if (!fdb)
3630 return -ENOMEM;
3631
3632 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
Scott Feldmana471be42015-09-23 08:39:14 -07003633 fdb->touched = jiffies;
Scott Feldman4c660492015-09-23 08:39:15 -07003634 fdb->key.rocker_port = rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01003635 ether_addr_copy(fdb->key.addr, addr);
3636 fdb->key.vlan_id = vlan_id;
3637 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3638
3639 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3640
3641 found = rocker_fdb_tbl_find(rocker, fdb);
3642
Scott Feldmana471be42015-09-23 08:39:14 -07003643 if (found) {
3644 found->touched = jiffies;
3645 if (removing) {
3646 rocker_port_kfree(trans, fdb);
3647 if (trans != SWITCHDEV_TRANS_PREPARE)
3648 hash_del(&found->entry);
3649 }
3650 } else if (!removing) {
Simon Horman42e94882015-05-21 12:40:15 +09003651 if (trans != SWITCHDEV_TRANS_PREPARE)
Scott Feldmana471be42015-09-23 08:39:14 -07003652 hash_add(rocker->fdb_tbl, &fdb->entry,
3653 fdb->key_crc32);
Scott Feldman6c707942014-11-28 14:34:28 +01003654 }
3655
3656 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3657
3658 /* Check if adding and already exists, or removing and can't find */
3659 if (!found != !removing) {
Simon Horman0985df72015-05-25 14:28:35 +09003660 rocker_port_kfree(trans, fdb);
Scott Feldman6c707942014-11-28 14:34:28 +01003661 if (!found && removing)
3662 return 0;
3663 /* Refreshing existing to update aging timers */
3664 flags |= ROCKER_OP_FLAG_REFRESH;
3665 }
3666
Scott Feldmanc4f20322015-05-10 09:47:50 -07003667 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003668}
3669
Scott Feldmanc4f20322015-05-10 09:47:50 -07003670static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003671 enum switchdev_trans trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003672{
3673 struct rocker *rocker = rocker_port->rocker;
3674 struct rocker_fdb_tbl_entry *found;
3675 unsigned long lock_flags;
Scott Feldman6c707942014-11-28 14:34:28 +01003676 struct hlist_node *tmp;
3677 int bkt;
3678 int err = 0;
3679
3680 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3681 rocker_port->stp_state == BR_STATE_FORWARDING)
3682 return 0;
3683
Scott Feldman179f9a22015-06-12 21:35:46 -07003684 flags |= ROCKER_OP_FLAG_REMOVE;
3685
Scott Feldman6c707942014-11-28 14:34:28 +01003686 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3687
3688 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07003689 if (found->key.rocker_port != rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +01003690 continue;
3691 if (!found->learned)
3692 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003693 err = rocker_port_fdb_learn(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003694 found->key.addr,
3695 found->key.vlan_id);
3696 if (err)
3697 goto err_out;
Simon Horman3098ac32015-05-21 12:40:14 +09003698 if (trans != SWITCHDEV_TRANS_PREPARE)
3699 hash_del(&found->entry);
Scott Feldman6c707942014-11-28 14:34:28 +01003700 }
3701
3702err_out:
3703 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3704
3705 return err;
3706}
3707
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003708static int rocker_port_router_mac(struct rocker_port *rocker_port,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003709 enum switchdev_trans trans, int flags,
3710 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003711{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003712 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003713 __be16 eth_type;
3714 const u8 *dst_mac_mask = ff_mac;
3715 __be16 vlan_id_mask = htons(0xffff);
3716 bool copy_to_cpu = false;
3717 int err;
3718
3719 if (ntohs(vlan_id) == 0)
3720 vlan_id = rocker_port->internal_vlan_id;
3721
3722 eth_type = htons(ETH_P_IP);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003723 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003724 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003725 eth_type, rocker_port->dev->dev_addr,
3726 dst_mac_mask, vlan_id, vlan_id_mask,
3727 copy_to_cpu, flags);
3728 if (err)
3729 return err;
3730
3731 eth_type = htons(ETH_P_IPV6);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003732 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003733 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003734 eth_type, rocker_port->dev->dev_addr,
3735 dst_mac_mask, vlan_id, vlan_id_mask,
3736 copy_to_cpu, flags);
3737
3738 return err;
3739}
3740
Scott Feldmanc4f20322015-05-10 09:47:50 -07003741static int rocker_port_fwding(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003742 enum switchdev_trans trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003743{
3744 bool pop_vlan;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003745 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003746 __be16 vlan_id;
3747 u16 vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003748 int err;
3749
3750 /* Port will be forwarding-enabled if its STP state is LEARNING
3751 * or FORWARDING. Traffic from CPU can still egress, regardless of
3752 * port STP state. Use L2 interface group on port VLANs as a way
3753 * to toggle port forwarding: if forwarding is disabled, L2
3754 * interface group will not exist.
3755 */
3756
3757 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3758 rocker_port->stp_state != BR_STATE_FORWARDING)
3759 flags |= ROCKER_OP_FLAG_REMOVE;
3760
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003761 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003762 for (vid = 1; vid < VLAN_N_VID; vid++) {
3763 if (!test_bit(vid, rocker_port->vlan_bitmap))
3764 continue;
3765 vlan_id = htons(vid);
3766 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003767 err = rocker_group_l2_interface(rocker_port, trans, flags,
3768 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003769 if (err) {
3770 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003771 "Error (%d) port VLAN l2 group for pport %d\n",
3772 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003773 return err;
3774 }
3775 }
3776
3777 return 0;
3778}
3779
Scott Feldmanc4f20322015-05-10 09:47:50 -07003780static int rocker_port_stp_update(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003781 enum switchdev_trans trans, int flags,
3782 u8 state)
Scott Feldman6c707942014-11-28 14:34:28 +01003783{
3784 bool want[ROCKER_CTRL_MAX] = { 0, };
Scott Feldmanc4f20322015-05-10 09:47:50 -07003785 bool prev_ctrls[ROCKER_CTRL_MAX];
3786 u8 prev_state;
Scott Feldman6c707942014-11-28 14:34:28 +01003787 int err;
3788 int i;
3789
Scott Feldmanc4f20322015-05-10 09:47:50 -07003790 if (trans == SWITCHDEV_TRANS_PREPARE) {
3791 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3792 prev_state = rocker_port->stp_state;
3793 }
3794
Scott Feldman6c707942014-11-28 14:34:28 +01003795 if (rocker_port->stp_state == state)
3796 return 0;
3797
3798 rocker_port->stp_state = state;
3799
3800 switch (state) {
3801 case BR_STATE_DISABLED:
3802 /* port is completely disabled */
3803 break;
3804 case BR_STATE_LISTENING:
3805 case BR_STATE_BLOCKING:
3806 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3807 break;
3808 case BR_STATE_LEARNING:
3809 case BR_STATE_FORWARDING:
Simon Horman82549732015-07-16 10:39:14 +09003810 if (!rocker_port_is_ovsed(rocker_port))
3811 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003812 want[ROCKER_CTRL_IPV4_MCAST] = true;
3813 want[ROCKER_CTRL_IPV6_MCAST] = true;
3814 if (rocker_port_is_bridged(rocker_port))
3815 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
Simon Horman82549732015-07-16 10:39:14 +09003816 else if (rocker_port_is_ovsed(rocker_port))
3817 want[ROCKER_CTRL_DFLT_OVS] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003818 else
3819 want[ROCKER_CTRL_LOCAL_ARP] = true;
3820 break;
3821 }
3822
3823 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3824 if (want[i] != rocker_port->ctrls[i]) {
Scott Feldman179f9a22015-06-12 21:35:46 -07003825 int ctrl_flags = flags |
3826 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3827 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003828 &rocker_ctrls[i]);
3829 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003830 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003831 rocker_port->ctrls[i] = want[i];
3832 }
3833 }
3834
Scott Feldman179f9a22015-06-12 21:35:46 -07003835 err = rocker_port_fdb_flush(rocker_port, trans, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01003836 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003837 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003838
Scott Feldman179f9a22015-06-12 21:35:46 -07003839 err = rocker_port_fwding(rocker_port, trans, flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003840
3841err_out:
3842 if (trans == SWITCHDEV_TRANS_PREPARE) {
3843 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3844 rocker_port->stp_state = prev_state;
3845 }
3846
3847 return err;
Scott Feldman6c707942014-11-28 14:34:28 +01003848}
3849
Scott Feldmanc4f20322015-05-10 09:47:50 -07003850static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003851 enum switchdev_trans trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003852{
3853 if (rocker_port_is_bridged(rocker_port))
3854 /* bridge STP will enable port */
3855 return 0;
3856
3857 /* port is not bridged, so simulate going to FORWARDING state */
Scott Feldman179f9a22015-06-12 21:35:46 -07003858 return rocker_port_stp_update(rocker_port, trans, flags,
3859 BR_STATE_FORWARDING);
Scott Feldmane47172a2015-02-25 20:15:38 -08003860}
3861
Scott Feldmanc4f20322015-05-10 09:47:50 -07003862static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
Scott Feldman179f9a22015-06-12 21:35:46 -07003863 enum switchdev_trans trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003864{
3865 if (rocker_port_is_bridged(rocker_port))
3866 /* bridge STP will disable port */
3867 return 0;
3868
3869 /* port is not bridged, so simulate going to DISABLED state */
Scott Feldman179f9a22015-06-12 21:35:46 -07003870 return rocker_port_stp_update(rocker_port, trans, flags,
3871 BR_STATE_DISABLED);
Scott Feldmane47172a2015-02-25 20:15:38 -08003872}
3873
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003874static struct rocker_internal_vlan_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003875rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003876{
3877 struct rocker_internal_vlan_tbl_entry *found;
3878
3879 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3880 entry, ifindex) {
3881 if (found->ifindex == ifindex)
3882 return found;
3883 }
3884
3885 return NULL;
3886}
3887
3888static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3889 int ifindex)
3890{
3891 struct rocker *rocker = rocker_port->rocker;
3892 struct rocker_internal_vlan_tbl_entry *entry;
3893 struct rocker_internal_vlan_tbl_entry *found;
3894 unsigned long lock_flags;
3895 int i;
3896
Simon Hormandf6a2062015-05-21 12:40:17 +09003897 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003898 if (!entry)
3899 return 0;
3900
3901 entry->ifindex = ifindex;
3902
3903 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3904
3905 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3906 if (found) {
Simon Hormandf6a2062015-05-21 12:40:17 +09003907 kfree(entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003908 goto found;
3909 }
3910
3911 found = entry;
3912 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3913
3914 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3915 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3916 continue;
3917 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3918 goto found;
3919 }
3920
3921 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3922
3923found:
3924 found->ref_count++;
3925 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3926
3927 return found->vlan_id;
3928}
3929
Simon Hormane5054642015-05-25 14:28:36 +09003930static void
3931rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3932 int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003933{
3934 struct rocker *rocker = rocker_port->rocker;
3935 struct rocker_internal_vlan_tbl_entry *found;
3936 unsigned long lock_flags;
3937 unsigned long bit;
3938
3939 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3940
3941 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3942 if (!found) {
3943 netdev_err(rocker_port->dev,
3944 "ifindex (%d) not found in internal VLAN tbl\n",
3945 ifindex);
3946 goto not_found;
3947 }
3948
3949 if (--found->ref_count <= 0) {
3950 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3951 clear_bit(bit, rocker->internal_vlan_bitmap);
3952 hash_del(&found->entry);
Simon Hormandf6a2062015-05-21 12:40:17 +09003953 kfree(found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003954 }
3955
3956not_found:
3957 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3958}
3959
Scott Feldmanc4f20322015-05-10 09:47:50 -07003960static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
3961 enum switchdev_trans trans, __be32 dst,
Simon Hormane5054642015-05-25 14:28:36 +09003962 int dst_len, const struct fib_info *fi,
3963 u32 tb_id, int flags)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003964{
Simon Hormane5054642015-05-25 14:28:36 +09003965 const struct fib_nh *nh;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003966 __be16 eth_type = htons(ETH_P_IP);
3967 __be32 dst_mask = inet_make_mask(dst_len);
3968 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3969 u32 priority = fi->fib_priority;
3970 enum rocker_of_dpa_table_id goto_tbl =
3971 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3972 u32 group_id;
3973 bool nh_on_port;
3974 bool has_gw;
3975 u32 index;
3976 int err;
3977
3978 /* XXX support ECMP */
3979
3980 nh = fi->fib_nh;
3981 nh_on_port = (fi->fib_dev == rocker_port->dev);
3982 has_gw = !!nh->nh_gw;
3983
3984 if (has_gw && nh_on_port) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003985 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003986 nh->nh_gw, &index);
3987 if (err)
3988 return err;
3989
3990 group_id = ROCKER_GROUP_L3_UNICAST(index);
3991 } else {
3992 /* Send to CPU for processing */
3993 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3994 }
3995
Scott Feldmanc4f20322015-05-10 09:47:50 -07003996 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003997 dst_mask, priority, goto_tbl,
3998 group_id, flags);
3999 if (err)
4000 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4001 err, &dst);
4002
4003 return err;
4004}
4005
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004006/*****************
4007 * Net device ops
4008 *****************/
4009
4010static int rocker_port_open(struct net_device *dev)
4011{
4012 struct rocker_port *rocker_port = netdev_priv(dev);
4013 int err;
4014
4015 err = rocker_port_dma_rings_init(rocker_port);
4016 if (err)
4017 return err;
4018
4019 err = request_irq(rocker_msix_tx_vector(rocker_port),
4020 rocker_tx_irq_handler, 0,
4021 rocker_driver_name, rocker_port);
4022 if (err) {
4023 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4024 goto err_request_tx_irq;
4025 }
4026
4027 err = request_irq(rocker_msix_rx_vector(rocker_port),
4028 rocker_rx_irq_handler, 0,
4029 rocker_driver_name, rocker_port);
4030 if (err) {
4031 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4032 goto err_request_rx_irq;
4033 }
4034
Scott Feldman179f9a22015-06-12 21:35:46 -07004035 err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01004036 if (err)
Scott Feldmane47172a2015-02-25 20:15:38 -08004037 goto err_fwd_enable;
Scott Feldman6c707942014-11-28 14:34:28 +01004038
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004039 napi_enable(&rocker_port->napi_tx);
4040 napi_enable(&rocker_port->napi_rx);
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004041 if (!dev->proto_down)
4042 rocker_port_set_enable(rocker_port, true);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004043 netif_start_queue(dev);
4044 return 0;
4045
Scott Feldmane47172a2015-02-25 20:15:38 -08004046err_fwd_enable:
Scott Feldman6c707942014-11-28 14:34:28 +01004047 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004048err_request_rx_irq:
4049 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4050err_request_tx_irq:
4051 rocker_port_dma_rings_fini(rocker_port);
4052 return err;
4053}
4054
4055static int rocker_port_stop(struct net_device *dev)
4056{
4057 struct rocker_port *rocker_port = netdev_priv(dev);
4058
4059 netif_stop_queue(dev);
4060 rocker_port_set_enable(rocker_port, false);
4061 napi_disable(&rocker_port->napi_rx);
4062 napi_disable(&rocker_port->napi_tx);
Scott Feldmanf66feaa2015-06-12 21:35:50 -07004063 rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE,
4064 ROCKER_OP_FLAG_NOWAIT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004065 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4066 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4067 rocker_port_dma_rings_fini(rocker_port);
4068
4069 return 0;
4070}
4071
Simon Hormane5054642015-05-25 14:28:36 +09004072static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4073 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004074{
Simon Hormane5054642015-05-25 14:28:36 +09004075 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004076 struct pci_dev *pdev = rocker->pdev;
Simon Hormane5054642015-05-25 14:28:36 +09004077 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004078 struct rocker_tlv *attr;
4079 int rem;
4080
4081 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4082 if (!attrs[ROCKER_TLV_TX_FRAGS])
4083 return;
4084 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
Simon Hormane5054642015-05-25 14:28:36 +09004085 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004086 dma_addr_t dma_handle;
4087 size_t len;
4088
4089 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4090 continue;
4091 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4092 attr);
4093 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4094 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4095 continue;
4096 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4097 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4098 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4099 }
4100}
4101
Simon Hormane5054642015-05-25 14:28:36 +09004102static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004103 struct rocker_desc_info *desc_info,
4104 char *buf, size_t buf_len)
4105{
Simon Hormane5054642015-05-25 14:28:36 +09004106 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004107 struct pci_dev *pdev = rocker->pdev;
4108 dma_addr_t dma_handle;
4109 struct rocker_tlv *frag;
4110
4111 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4112 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4113 if (net_ratelimit())
4114 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4115 return -EIO;
4116 }
4117 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4118 if (!frag)
4119 goto unmap_frag;
4120 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4121 dma_handle))
4122 goto nest_cancel;
4123 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4124 buf_len))
4125 goto nest_cancel;
4126 rocker_tlv_nest_end(desc_info, frag);
4127 return 0;
4128
4129nest_cancel:
4130 rocker_tlv_nest_cancel(desc_info, frag);
4131unmap_frag:
4132 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4133 return -EMSGSIZE;
4134}
4135
4136static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4137{
4138 struct rocker_port *rocker_port = netdev_priv(dev);
4139 struct rocker *rocker = rocker_port->rocker;
4140 struct rocker_desc_info *desc_info;
4141 struct rocker_tlv *frags;
4142 int i;
4143 int err;
4144
4145 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4146 if (unlikely(!desc_info)) {
4147 if (net_ratelimit())
4148 netdev_err(dev, "tx ring full when queue awake\n");
4149 return NETDEV_TX_BUSY;
4150 }
4151
4152 rocker_desc_cookie_ptr_set(desc_info, skb);
4153
4154 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4155 if (!frags)
4156 goto out;
4157 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4158 skb->data, skb_headlen(skb));
4159 if (err)
4160 goto nest_cancel;
Jiri Pirko95b9be62015-08-02 20:56:38 +02004161 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4162 err = skb_linearize(skb);
4163 if (err)
4164 goto unmap_frags;
4165 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004166
4167 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4168 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4169
4170 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4171 skb_frag_address(frag),
4172 skb_frag_size(frag));
4173 if (err)
4174 goto unmap_frags;
4175 }
4176 rocker_tlv_nest_end(desc_info, frags);
4177
4178 rocker_desc_gen_clear(desc_info);
4179 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4180
4181 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4182 if (!desc_info)
4183 netif_stop_queue(dev);
4184
4185 return NETDEV_TX_OK;
4186
4187unmap_frags:
4188 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4189nest_cancel:
4190 rocker_tlv_nest_cancel(desc_info, frags);
4191out:
4192 dev_kfree_skb(skb);
David Ahernf2bbca52015-01-16 14:22:29 -07004193 dev->stats.tx_dropped++;
4194
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004195 return NETDEV_TX_OK;
4196}
4197
4198static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4199{
4200 struct sockaddr *addr = p;
4201 struct rocker_port *rocker_port = netdev_priv(dev);
4202 int err;
4203
4204 if (!is_valid_ether_addr(addr->sa_data))
4205 return -EADDRNOTAVAIL;
4206
4207 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4208 if (err)
4209 return err;
4210 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4211 return 0;
4212}
4213
Scott Feldman77a58c72015-07-08 16:06:47 -07004214static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4215{
4216 struct rocker_port *rocker_port = netdev_priv(dev);
4217 int running = netif_running(dev);
4218 int err;
4219
4220#define ROCKER_PORT_MIN_MTU 68
4221#define ROCKER_PORT_MAX_MTU 9000
4222
4223 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4224 return -EINVAL;
4225
4226 if (running)
4227 rocker_port_stop(dev);
4228
4229 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4230 dev->mtu = new_mtu;
4231
4232 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4233 if (err)
4234 return err;
4235
4236 if (running)
4237 err = rocker_port_open(dev);
4238
4239 return err;
4240}
4241
David Aherndb191702015-03-17 20:23:16 -06004242static int rocker_port_get_phys_port_name(struct net_device *dev,
4243 char *buf, size_t len)
4244{
4245 struct rocker_port *rocker_port = netdev_priv(dev);
4246 struct port_name name = { .buf = buf, .len = len };
4247 int err;
4248
Scott Feldman179f9a22015-06-12 21:35:46 -07004249 err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
David Aherndb191702015-03-17 20:23:16 -06004250 rocker_cmd_get_port_settings_prep, NULL,
4251 rocker_cmd_get_port_settings_phys_name_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004252 &name);
David Aherndb191702015-03-17 20:23:16 -06004253
4254 return err ? -EOPNOTSUPP : 0;
4255}
4256
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004257static int rocker_port_change_proto_down(struct net_device *dev,
4258 bool proto_down)
4259{
4260 struct rocker_port *rocker_port = netdev_priv(dev);
4261
4262 if (rocker_port->dev->flags & IFF_UP)
4263 rocker_port_set_enable(rocker_port, !proto_down);
4264 rocker_port->dev->proto_down = proto_down;
4265 return 0;
4266}
4267
Scott Feldmandd19f832015-08-12 18:45:25 -07004268static void rocker_port_neigh_destroy(struct neighbour *n)
4269{
4270 struct rocker_port *rocker_port = netdev_priv(n->dev);
4271 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4272 __be32 ip_addr = *(__be32 *)n->primary_key;
4273
4274 rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
4275 flags, ip_addr, n->ha);
4276}
4277
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004278static const struct net_device_ops rocker_port_netdev_ops = {
4279 .ndo_open = rocker_port_open,
4280 .ndo_stop = rocker_port_stop,
4281 .ndo_start_xmit = rocker_port_xmit,
4282 .ndo_set_mac_address = rocker_port_set_mac_address,
Scott Feldman77a58c72015-07-08 16:06:47 -07004283 .ndo_change_mtu = rocker_port_change_mtu,
Scott Feldman85fdb952015-05-10 09:48:05 -07004284 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
Scott Feldmanfc8f40d2015-05-10 09:47:57 -07004285 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
Scott Feldman54ba5a02015-05-10 09:48:01 -07004286 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004287 .ndo_fdb_add = switchdev_port_fdb_add,
4288 .ndo_fdb_del = switchdev_port_fdb_del,
4289 .ndo_fdb_dump = switchdev_port_fdb_dump,
David Aherndb191702015-03-17 20:23:16 -06004290 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004291 .ndo_change_proto_down = rocker_port_change_proto_down,
Scott Feldmandd19f832015-08-12 18:45:25 -07004292 .ndo_neigh_destroy = rocker_port_neigh_destroy,
Scott Feldman98237d42015-03-15 21:07:15 -07004293};
4294
4295/********************
4296 * swdev interface
4297 ********************/
4298
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004299static int rocker_port_attr_get(struct net_device *dev,
4300 struct switchdev_attr *attr)
Scott Feldman98237d42015-03-15 21:07:15 -07004301{
Simon Hormane5054642015-05-25 14:28:36 +09004302 const struct rocker_port *rocker_port = netdev_priv(dev);
4303 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman98237d42015-03-15 21:07:15 -07004304
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004305 switch (attr->id) {
4306 case SWITCHDEV_ATTR_PORT_PARENT_ID:
Scott Feldman42275bd2015-05-13 11:16:50 -07004307 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4308 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004309 break;
Scott Feldman6004c862015-05-10 09:47:55 -07004310 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
Scott Feldman42275bd2015-05-13 11:16:50 -07004311 attr->u.brport_flags = rocker_port->brport_flags;
Scott Feldman6004c862015-05-10 09:47:55 -07004312 break;
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004313 default:
4314 return -EOPNOTSUPP;
4315 }
4316
Scott Feldman98237d42015-03-15 21:07:15 -07004317 return 0;
4318}
4319
Simon Hormane5054642015-05-25 14:28:36 +09004320static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
Scott Feldmanc4f20322015-05-10 09:47:50 -07004321{
4322 struct list_head *mem, *tmp;
4323
4324 list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
4325 list_del(mem);
4326 kfree(mem);
4327 }
4328}
4329
Scott Feldman6004c862015-05-10 09:47:55 -07004330static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4331 enum switchdev_trans trans,
4332 unsigned long brport_flags)
4333{
4334 unsigned long orig_flags;
4335 int err = 0;
4336
4337 orig_flags = rocker_port->brport_flags;
4338 rocker_port->brport_flags = brport_flags;
4339 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4340 err = rocker_port_set_learning(rocker_port, trans);
4341
4342 if (trans == SWITCHDEV_TRANS_PREPARE)
4343 rocker_port->brport_flags = orig_flags;
4344
4345 return err;
4346}
4347
Scott Feldmanc4f20322015-05-10 09:47:50 -07004348static int rocker_port_attr_set(struct net_device *dev,
4349 struct switchdev_attr *attr)
4350{
4351 struct rocker_port *rocker_port = netdev_priv(dev);
4352 int err = 0;
4353
4354 switch (attr->trans) {
4355 case SWITCHDEV_TRANS_PREPARE:
4356 BUG_ON(!list_empty(&rocker_port->trans_mem));
4357 break;
4358 case SWITCHDEV_TRANS_ABORT:
4359 rocker_port_trans_abort(rocker_port);
4360 return 0;
4361 default:
4362 break;
4363 }
4364
4365 switch (attr->id) {
Scott Feldman35636062015-05-10 09:47:51 -07004366 case SWITCHDEV_ATTR_PORT_STP_STATE:
Scott Feldmanac283932015-06-12 21:35:48 -07004367 err = rocker_port_stp_update(rocker_port, attr->trans,
4368 ROCKER_OP_FLAG_NOWAIT,
Scott Feldman42275bd2015-05-13 11:16:50 -07004369 attr->u.stp_state);
Scott Feldman35636062015-05-10 09:47:51 -07004370 break;
Scott Feldman6004c862015-05-10 09:47:55 -07004371 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
4372 err = rocker_port_brport_flags_set(rocker_port, attr->trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004373 attr->u.brport_flags);
Scott Feldman6004c862015-05-10 09:47:55 -07004374 break;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004375 default:
4376 err = -EOPNOTSUPP;
4377 break;
4378 }
4379
4380 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004381}
4382
Scott Feldman9228ad22015-05-10 09:47:54 -07004383static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4384 enum switchdev_trans trans, u16 vid, u16 flags)
4385{
4386 int err;
4387
4388 /* XXX deal with flags for PVID and untagged */
4389
4390 err = rocker_port_vlan(rocker_port, trans, 0, vid);
4391 if (err)
4392 return err;
4393
Scott Feldmancec04a62015-06-01 11:39:03 -07004394 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4395 if (err)
4396 rocker_port_vlan(rocker_port, trans,
4397 ROCKER_OP_FLAG_REMOVE, vid);
4398
4399 return err;
Scott Feldman9228ad22015-05-10 09:47:54 -07004400}
4401
4402static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4403 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004404 const struct switchdev_obj_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004405{
4406 u16 vid;
4407 int err;
4408
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004409 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004410 err = rocker_port_vlan_add(rocker_port, trans,
4411 vid, vlan->flags);
4412 if (err)
4413 return err;
4414 }
4415
4416 return 0;
4417}
4418
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004419static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4420 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004421 const struct switchdev_obj_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004422{
4423 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4424 int flags = 0;
4425
4426 if (!rocker_port_is_bridged(rocker_port))
4427 return -EINVAL;
4428
4429 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4430}
4431
Scott Feldman9228ad22015-05-10 09:47:54 -07004432static int rocker_port_obj_add(struct net_device *dev,
4433 struct switchdev_obj *obj)
4434{
4435 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004436 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004437 int err = 0;
4438
4439 switch (obj->trans) {
4440 case SWITCHDEV_TRANS_PREPARE:
4441 BUG_ON(!list_empty(&rocker_port->trans_mem));
4442 break;
4443 case SWITCHDEV_TRANS_ABORT:
4444 rocker_port_trans_abort(rocker_port);
4445 return 0;
4446 default:
4447 break;
4448 }
4449
4450 switch (obj->id) {
4451 case SWITCHDEV_OBJ_PORT_VLAN:
4452 err = rocker_port_vlans_add(rocker_port, obj->trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004453 &obj->u.vlan);
Scott Feldman9228ad22015-05-10 09:47:54 -07004454 break;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004455 case SWITCHDEV_OBJ_IPV4_FIB:
Scott Feldman42275bd2015-05-13 11:16:50 -07004456 fib4 = &obj->u.ipv4_fib;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004457 err = rocker_port_fib_ipv4(rocker_port, obj->trans,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004458 htonl(fib4->dst), fib4->dst_len,
Scott Feldman58c2cb12015-05-10 09:48:06 -07004459 fib4->fi, fib4->tb_id, 0);
4460 break;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004461 case SWITCHDEV_OBJ_PORT_FDB:
4462 err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
4463 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004464 default:
4465 err = -EOPNOTSUPP;
4466 break;
4467 }
4468
4469 return err;
4470}
4471
4472static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4473 u16 vid, u16 flags)
4474{
4475 int err;
4476
4477 err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
4478 ROCKER_OP_FLAG_REMOVE, htons(vid));
4479 if (err)
4480 return err;
4481
4482 return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
4483 ROCKER_OP_FLAG_REMOVE, vid);
4484}
4485
4486static int rocker_port_vlans_del(struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004487 const struct switchdev_obj_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004488{
4489 u16 vid;
4490 int err;
4491
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004492 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004493 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4494 if (err)
4495 return err;
4496 }
4497
4498 return 0;
4499}
4500
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004501static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4502 enum switchdev_trans trans,
Simon Hormane5054642015-05-25 14:28:36 +09004503 const struct switchdev_obj_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004504{
4505 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
Scott Feldmanb4ad7ba2015-06-14 11:33:11 -07004506 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004507
4508 if (!rocker_port_is_bridged(rocker_port))
4509 return -EINVAL;
4510
4511 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4512}
4513
Scott Feldman9228ad22015-05-10 09:47:54 -07004514static int rocker_port_obj_del(struct net_device *dev,
4515 struct switchdev_obj *obj)
4516{
4517 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004518 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004519 int err = 0;
4520
4521 switch (obj->id) {
4522 case SWITCHDEV_OBJ_PORT_VLAN:
Scott Feldman42275bd2015-05-13 11:16:50 -07004523 err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
Scott Feldman9228ad22015-05-10 09:47:54 -07004524 break;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004525 case SWITCHDEV_OBJ_IPV4_FIB:
Scott Feldman42275bd2015-05-13 11:16:50 -07004526 fib4 = &obj->u.ipv4_fib;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004527 err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004528 htonl(fib4->dst), fib4->dst_len,
4529 fib4->fi, fib4->tb_id,
4530 ROCKER_OP_FLAG_REMOVE);
Scott Feldman58c2cb12015-05-10 09:48:06 -07004531 break;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004532 case SWITCHDEV_OBJ_PORT_FDB:
4533 err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
4534 break;
4535 default:
4536 err = -EOPNOTSUPP;
4537 break;
4538 }
4539
4540 return err;
4541}
4542
Simon Hormane5054642015-05-25 14:28:36 +09004543static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004544 struct switchdev_obj *obj)
4545{
4546 struct rocker *rocker = rocker_port->rocker;
4547 struct switchdev_obj_fdb *fdb = &obj->u.fdb;
4548 struct rocker_fdb_tbl_entry *found;
4549 struct hlist_node *tmp;
4550 unsigned long lock_flags;
4551 int bkt;
4552 int err = 0;
4553
4554 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4555 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07004556 if (found->key.rocker_port != rocker_port)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004557 continue;
David S. Millercdf09692015-08-11 12:00:37 -07004558 fdb->addr = found->key.addr;
Vivien Didelotce80e7b2015-08-10 09:09:52 -04004559 fdb->ndm_state = NUD_REACHABLE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004560 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4561 found->key.vlan_id);
4562 err = obj->cb(rocker_port->dev, obj);
4563 if (err)
4564 break;
4565 }
4566 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4567
4568 return err;
4569}
4570
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004571static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4572 struct switchdev_obj *obj)
4573{
4574 struct switchdev_obj_vlan *vlan = &obj->u.vlan;
4575 u16 vid;
4576 int err = 0;
4577
4578 for (vid = 1; vid < VLAN_N_VID; vid++) {
4579 if (!test_bit(vid, rocker_port->vlan_bitmap))
4580 continue;
4581 vlan->flags = 0;
4582 if (rocker_vlan_id_is_internal(htons(vid)))
4583 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4584 vlan->vid_begin = vlan->vid_end = vid;
4585 err = obj->cb(rocker_port->dev, obj);
4586 if (err)
4587 break;
4588 }
4589
4590 return err;
4591}
4592
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004593static int rocker_port_obj_dump(struct net_device *dev,
4594 struct switchdev_obj *obj)
4595{
Simon Hormane5054642015-05-25 14:28:36 +09004596 const struct rocker_port *rocker_port = netdev_priv(dev);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004597 int err = 0;
4598
4599 switch (obj->id) {
4600 case SWITCHDEV_OBJ_PORT_FDB:
4601 err = rocker_port_fdb_dump(rocker_port, obj);
4602 break;
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004603 case SWITCHDEV_OBJ_PORT_VLAN:
4604 err = rocker_port_vlan_dump(rocker_port, obj);
4605 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004606 default:
4607 err = -EOPNOTSUPP;
4608 break;
4609 }
4610
4611 return err;
4612}
4613
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004614static const struct switchdev_ops rocker_port_switchdev_ops = {
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004615 .switchdev_port_attr_get = rocker_port_attr_get,
Scott Feldman35636062015-05-10 09:47:51 -07004616 .switchdev_port_attr_set = rocker_port_attr_set,
Scott Feldman9228ad22015-05-10 09:47:54 -07004617 .switchdev_port_obj_add = rocker_port_obj_add,
4618 .switchdev_port_obj_del = rocker_port_obj_del,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004619 .switchdev_port_obj_dump = rocker_port_obj_dump,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004620};
4621
4622/********************
4623 * ethtool interface
4624 ********************/
4625
4626static int rocker_port_get_settings(struct net_device *dev,
4627 struct ethtool_cmd *ecmd)
4628{
4629 struct rocker_port *rocker_port = netdev_priv(dev);
4630
4631 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4632}
4633
4634static int rocker_port_set_settings(struct net_device *dev,
4635 struct ethtool_cmd *ecmd)
4636{
4637 struct rocker_port *rocker_port = netdev_priv(dev);
4638
4639 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4640}
4641
4642static void rocker_port_get_drvinfo(struct net_device *dev,
4643 struct ethtool_drvinfo *drvinfo)
4644{
4645 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4646 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4647}
4648
David Ahern9766e972015-01-29 20:59:33 -07004649static struct rocker_port_stats {
4650 char str[ETH_GSTRING_LEN];
4651 int type;
4652} rocker_port_stats[] = {
4653 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4654 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4655 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4656 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4657
4658 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4659 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4660 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4661 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4662};
4663
4664#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4665
4666static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4667 u8 *data)
4668{
4669 u8 *p = data;
4670 int i;
4671
4672 switch (stringset) {
4673 case ETH_SS_STATS:
4674 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4675 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4676 p += ETH_GSTRING_LEN;
4677 }
4678 break;
4679 }
4680}
4681
4682static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004683rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
David Ahern9766e972015-01-29 20:59:33 -07004684 struct rocker_desc_info *desc_info,
4685 void *priv)
4686{
4687 struct rocker_tlv *cmd_stats;
4688
4689 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4690 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4691 return -EMSGSIZE;
4692
4693 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4694 if (!cmd_stats)
4695 return -EMSGSIZE;
4696
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004697 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4698 rocker_port->pport))
David Ahern9766e972015-01-29 20:59:33 -07004699 return -EMSGSIZE;
4700
4701 rocker_tlv_nest_end(desc_info, cmd_stats);
4702
4703 return 0;
4704}
4705
4706static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004707rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004708 const struct rocker_desc_info *desc_info,
David Ahern9766e972015-01-29 20:59:33 -07004709 void *priv)
4710{
Simon Hormane5054642015-05-25 14:28:36 +09004711 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4712 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4713 const struct rocker_tlv *pattr;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004714 u32 pport;
David Ahern9766e972015-01-29 20:59:33 -07004715 u64 *data = priv;
4716 int i;
4717
4718 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4719
4720 if (!attrs[ROCKER_TLV_CMD_INFO])
4721 return -EIO;
4722
4723 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4724 attrs[ROCKER_TLV_CMD_INFO]);
4725
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004726 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
David Ahern9766e972015-01-29 20:59:33 -07004727 return -EIO;
4728
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004729 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4730 if (pport != rocker_port->pport)
David Ahern9766e972015-01-29 20:59:33 -07004731 return -EIO;
4732
4733 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4734 pattr = stats_attrs[rocker_port_stats[i].type];
4735 if (!pattr)
4736 continue;
4737
4738 data[i] = rocker_tlv_get_u64(pattr);
4739 }
4740
4741 return 0;
4742}
4743
4744static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4745 void *priv)
4746{
Scott Feldman179f9a22015-06-12 21:35:46 -07004747 return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
David Ahern9766e972015-01-29 20:59:33 -07004748 rocker_cmd_get_port_stats_prep, NULL,
4749 rocker_cmd_get_port_stats_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004750 priv);
David Ahern9766e972015-01-29 20:59:33 -07004751}
4752
4753static void rocker_port_get_stats(struct net_device *dev,
4754 struct ethtool_stats *stats, u64 *data)
4755{
4756 struct rocker_port *rocker_port = netdev_priv(dev);
4757
4758 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4759 int i;
4760
4761 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4762 data[i] = 0;
4763 }
David Ahern9766e972015-01-29 20:59:33 -07004764}
4765
4766static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4767{
4768 switch (sset) {
4769 case ETH_SS_STATS:
4770 return ROCKER_PORT_STATS_LEN;
4771 default:
4772 return -EOPNOTSUPP;
4773 }
4774}
4775
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004776static const struct ethtool_ops rocker_port_ethtool_ops = {
4777 .get_settings = rocker_port_get_settings,
4778 .set_settings = rocker_port_set_settings,
4779 .get_drvinfo = rocker_port_get_drvinfo,
4780 .get_link = ethtool_op_get_link,
David Ahern9766e972015-01-29 20:59:33 -07004781 .get_strings = rocker_port_get_strings,
4782 .get_ethtool_stats = rocker_port_get_stats,
4783 .get_sset_count = rocker_port_get_sset_count,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004784};
4785
4786/*****************
4787 * NAPI interface
4788 *****************/
4789
4790static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4791{
4792 return container_of(napi, struct rocker_port, napi_tx);
4793}
4794
4795static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4796{
4797 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09004798 const struct rocker *rocker = rocker_port->rocker;
4799 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004800 u32 credits = 0;
4801 int err;
4802
4803 /* Cleanup tx descriptors */
4804 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
David Ahernf2bbca52015-01-16 14:22:29 -07004805 struct sk_buff *skb;
4806
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004807 err = rocker_desc_err(desc_info);
4808 if (err && net_ratelimit())
4809 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4810 err);
4811 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
David Ahernf2bbca52015-01-16 14:22:29 -07004812
4813 skb = rocker_desc_cookie_ptr_get(desc_info);
4814 if (err == 0) {
4815 rocker_port->dev->stats.tx_packets++;
4816 rocker_port->dev->stats.tx_bytes += skb->len;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004817 } else {
David Ahernf2bbca52015-01-16 14:22:29 -07004818 rocker_port->dev->stats.tx_errors++;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004819 }
David Ahernf2bbca52015-01-16 14:22:29 -07004820
4821 dev_kfree_skb_any(skb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004822 credits++;
4823 }
4824
4825 if (credits && netif_queue_stopped(rocker_port->dev))
4826 netif_wake_queue(rocker_port->dev);
4827
4828 napi_complete(napi);
4829 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4830
4831 return 0;
4832}
4833
Simon Hormane5054642015-05-25 14:28:36 +09004834static int rocker_port_rx_proc(const struct rocker *rocker,
4835 const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004836 struct rocker_desc_info *desc_info)
4837{
Simon Hormane5054642015-05-25 14:28:36 +09004838 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004839 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4840 size_t rx_len;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004841 u16 rx_flags = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004842
4843 if (!skb)
4844 return -ENOENT;
4845
4846 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4847 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4848 return -EINVAL;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004849 if (attrs[ROCKER_TLV_RX_FLAGS])
4850 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004851
4852 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4853
4854 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4855 skb_put(skb, rx_len);
4856 skb->protocol = eth_type_trans(skb, rocker_port->dev);
David Ahernf2bbca52015-01-16 14:22:29 -07004857
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004858 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4859 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4860
David Ahernf2bbca52015-01-16 14:22:29 -07004861 rocker_port->dev->stats.rx_packets++;
4862 rocker_port->dev->stats.rx_bytes += skb->len;
4863
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004864 netif_receive_skb(skb);
4865
Simon Horman534ba6a2015-06-01 13:25:04 +09004866 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004867}
4868
4869static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4870{
4871 return container_of(napi, struct rocker_port, napi_rx);
4872}
4873
4874static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4875{
4876 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09004877 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004878 struct rocker_desc_info *desc_info;
4879 u32 credits = 0;
4880 int err;
4881
4882 /* Process rx descriptors */
4883 while (credits < budget &&
4884 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4885 err = rocker_desc_err(desc_info);
4886 if (err) {
4887 if (net_ratelimit())
4888 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4889 err);
4890 } else {
4891 err = rocker_port_rx_proc(rocker, rocker_port,
4892 desc_info);
4893 if (err && net_ratelimit())
4894 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4895 err);
4896 }
David Ahernf2bbca52015-01-16 14:22:29 -07004897 if (err)
4898 rocker_port->dev->stats.rx_errors++;
4899
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004900 rocker_desc_gen_clear(desc_info);
4901 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4902 credits++;
4903 }
4904
4905 if (credits < budget)
4906 napi_complete(napi);
4907
4908 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4909
4910 return credits;
4911}
4912
4913/*****************
4914 * PCI driver ops
4915 *****************/
4916
Simon Hormane5054642015-05-25 14:28:36 +09004917static void rocker_carrier_init(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004918{
Simon Hormane5054642015-05-25 14:28:36 +09004919 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004920 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4921 bool link_up;
4922
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004923 link_up = link_status & (1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004924 if (link_up)
4925 netif_carrier_on(rocker_port->dev);
4926 else
4927 netif_carrier_off(rocker_port->dev);
4928}
4929
Simon Hormane5054642015-05-25 14:28:36 +09004930static void rocker_remove_ports(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004931{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004932 struct rocker_port *rocker_port;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004933 int i;
4934
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004935 for (i = 0; i < rocker->port_count; i++) {
4936 rocker_port = rocker->ports[i];
Scott Feldmana0720312015-06-12 21:09:44 -07004937 if (!rocker_port)
4938 continue;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004939 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4940 ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004941 unregister_netdev(rocker_port->dev);
Ido Schimmel1ebd47e2015-08-02 19:29:16 +02004942 free_netdev(rocker_port->dev);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004943 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004944 kfree(rocker->ports);
4945}
4946
Simon Horman534ba6a2015-06-01 13:25:04 +09004947static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004948{
Simon Horman534ba6a2015-06-01 13:25:04 +09004949 const struct rocker *rocker = rocker_port->rocker;
Simon Hormane5054642015-05-25 14:28:36 +09004950 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004951 int err;
4952
4953 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4954 rocker_port->dev->dev_addr);
4955 if (err) {
4956 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4957 eth_hw_addr_random(rocker_port->dev);
4958 }
4959}
4960
4961static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4962{
Simon Hormane5054642015-05-25 14:28:36 +09004963 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004964 struct rocker_port *rocker_port;
4965 struct net_device *dev;
Scott Feldmanbcfd7802015-06-01 11:39:04 -07004966 u16 untagged_vid = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004967 int err;
4968
4969 dev = alloc_etherdev(sizeof(struct rocker_port));
4970 if (!dev)
4971 return -ENOMEM;
4972 rocker_port = netdev_priv(dev);
4973 rocker_port->dev = dev;
4974 rocker_port->rocker = rocker;
4975 rocker_port->port_number = port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004976 rocker_port->pport = port_number + 1;
Scott Feldman5111f802014-11-28 14:34:30 +01004977 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004978 INIT_LIST_HEAD(&rocker_port->trans_mem);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004979
Simon Horman534ba6a2015-06-01 13:25:04 +09004980 rocker_port_dev_addr_init(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004981 dev->netdev_ops = &rocker_port_netdev_ops;
4982 dev->ethtool_ops = &rocker_port_ethtool_ops;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004983 dev->switchdev_ops = &rocker_port_switchdev_ops;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004984 netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4985 NAPI_POLL_WEIGHT);
4986 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4987 NAPI_POLL_WEIGHT);
4988 rocker_carrier_init(rocker_port);
4989
Ido Schimmel21518a62015-08-02 20:56:37 +02004990 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004991
4992 err = register_netdev(dev);
4993 if (err) {
4994 dev_err(&pdev->dev, "register_netdev failed\n");
4995 goto err_register_netdev;
4996 }
4997 rocker->ports[port_number] = rocker_port;
4998
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004999 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5000
Scott Feldmanc4f20322015-05-10 09:47:50 -07005001 rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
Scott Feldman5111f802014-11-28 14:34:30 +01005002
Scott Feldmanc4f20322015-05-10 09:47:50 -07005003 err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005004 if (err) {
Scott Feldmanff147022015-08-03 22:31:18 -07005005 netdev_err(rocker_port->dev, "install ig port table failed\n");
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005006 goto err_port_ig_tbl;
5007 }
5008
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005009 rocker_port->internal_vlan_id =
5010 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5011
5012 err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5013 untagged_vid, 0);
5014 if (err) {
5015 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5016 goto err_untagged_vlan;
5017 }
5018
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005019 return 0;
5020
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005021err_untagged_vlan:
5022 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
5023 ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005024err_port_ig_tbl:
Scott Feldman6c4f7782015-08-03 22:31:17 -07005025 rocker->ports[port_number] = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005026 unregister_netdev(dev);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005027err_register_netdev:
5028 free_netdev(dev);
5029 return err;
5030}
5031
5032static int rocker_probe_ports(struct rocker *rocker)
5033{
5034 int i;
5035 size_t alloc_size;
5036 int err;
5037
5038 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
Scott Feldman27b808c2015-06-01 11:39:02 -07005039 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
Dan Carpentere65ad3b2015-02-25 16:35:32 +03005040 if (!rocker->ports)
5041 return -ENOMEM;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005042 for (i = 0; i < rocker->port_count; i++) {
5043 err = rocker_probe_port(rocker, i);
5044 if (err)
5045 goto remove_ports;
5046 }
5047 return 0;
5048
5049remove_ports:
5050 rocker_remove_ports(rocker);
5051 return err;
5052}
5053
5054static int rocker_msix_init(struct rocker *rocker)
5055{
5056 struct pci_dev *pdev = rocker->pdev;
5057 int msix_entries;
5058 int i;
5059 int err;
5060
5061 msix_entries = pci_msix_vec_count(pdev);
5062 if (msix_entries < 0)
5063 return msix_entries;
5064
5065 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5066 return -EINVAL;
5067
5068 rocker->msix_entries = kmalloc_array(msix_entries,
5069 sizeof(struct msix_entry),
5070 GFP_KERNEL);
5071 if (!rocker->msix_entries)
5072 return -ENOMEM;
5073
5074 for (i = 0; i < msix_entries; i++)
5075 rocker->msix_entries[i].entry = i;
5076
5077 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5078 if (err < 0)
5079 goto err_enable_msix;
5080
5081 return 0;
5082
5083err_enable_msix:
5084 kfree(rocker->msix_entries);
5085 return err;
5086}
5087
Simon Hormane5054642015-05-25 14:28:36 +09005088static void rocker_msix_fini(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005089{
5090 pci_disable_msix(rocker->pdev);
5091 kfree(rocker->msix_entries);
5092}
5093
5094static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5095{
5096 struct rocker *rocker;
5097 int err;
5098
5099 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5100 if (!rocker)
5101 return -ENOMEM;
5102
5103 err = pci_enable_device(pdev);
5104 if (err) {
5105 dev_err(&pdev->dev, "pci_enable_device failed\n");
5106 goto err_pci_enable_device;
5107 }
5108
5109 err = pci_request_regions(pdev, rocker_driver_name);
5110 if (err) {
5111 dev_err(&pdev->dev, "pci_request_regions failed\n");
5112 goto err_pci_request_regions;
5113 }
5114
5115 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5116 if (!err) {
5117 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5118 if (err) {
5119 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5120 goto err_pci_set_dma_mask;
5121 }
5122 } else {
5123 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5124 if (err) {
5125 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5126 goto err_pci_set_dma_mask;
5127 }
5128 }
5129
5130 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5131 dev_err(&pdev->dev, "invalid PCI region size\n");
Wei Yongjun3122a922015-04-16 20:21:02 +08005132 err = -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005133 goto err_pci_resource_len_check;
5134 }
5135
5136 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5137 pci_resource_len(pdev, 0));
5138 if (!rocker->hw_addr) {
5139 dev_err(&pdev->dev, "ioremap failed\n");
5140 err = -EIO;
5141 goto err_ioremap;
5142 }
5143 pci_set_master(pdev);
5144
5145 rocker->pdev = pdev;
5146 pci_set_drvdata(pdev, rocker);
5147
5148 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5149
5150 err = rocker_msix_init(rocker);
5151 if (err) {
5152 dev_err(&pdev->dev, "MSI-X init failed\n");
5153 goto err_msix_init;
5154 }
5155
5156 err = rocker_basic_hw_test(rocker);
5157 if (err) {
5158 dev_err(&pdev->dev, "basic hw test failed\n");
5159 goto err_basic_hw_test;
5160 }
5161
5162 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5163
5164 err = rocker_dma_rings_init(rocker);
5165 if (err)
5166 goto err_dma_rings_init;
5167
5168 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5169 rocker_cmd_irq_handler, 0,
5170 rocker_driver_name, rocker);
5171 if (err) {
5172 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5173 goto err_request_cmd_irq;
5174 }
5175
5176 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5177 rocker_event_irq_handler, 0,
5178 rocker_driver_name, rocker);
5179 if (err) {
5180 dev_err(&pdev->dev, "cannot assign event irq\n");
5181 goto err_request_event_irq;
5182 }
5183
5184 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5185
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005186 err = rocker_init_tbls(rocker);
5187 if (err) {
5188 dev_err(&pdev->dev, "cannot init rocker tables\n");
5189 goto err_init_tbls;
5190 }
5191
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005192 err = rocker_probe_ports(rocker);
5193 if (err) {
5194 dev_err(&pdev->dev, "failed to probe ports\n");
5195 goto err_probe_ports;
5196 }
5197
Scott Feldmanc8beb5b2015-08-12 18:44:13 -07005198 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5199 (int)sizeof(rocker->hw.id), &rocker->hw.id);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005200
5201 return 0;
5202
5203err_probe_ports:
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005204 rocker_free_tbls(rocker);
5205err_init_tbls:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005206 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5207err_request_event_irq:
5208 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5209err_request_cmd_irq:
5210 rocker_dma_rings_fini(rocker);
5211err_dma_rings_init:
5212err_basic_hw_test:
5213 rocker_msix_fini(rocker);
5214err_msix_init:
5215 iounmap(rocker->hw_addr);
5216err_ioremap:
5217err_pci_resource_len_check:
5218err_pci_set_dma_mask:
5219 pci_release_regions(pdev);
5220err_pci_request_regions:
5221 pci_disable_device(pdev);
5222err_pci_enable_device:
5223 kfree(rocker);
5224 return err;
5225}
5226
5227static void rocker_remove(struct pci_dev *pdev)
5228{
5229 struct rocker *rocker = pci_get_drvdata(pdev);
5230
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005231 rocker_free_tbls(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005232 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5233 rocker_remove_ports(rocker);
5234 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5235 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5236 rocker_dma_rings_fini(rocker);
5237 rocker_msix_fini(rocker);
5238 iounmap(rocker->hw_addr);
5239 pci_release_regions(rocker->pdev);
5240 pci_disable_device(rocker->pdev);
5241 kfree(rocker);
5242}
5243
5244static struct pci_driver rocker_pci_driver = {
5245 .name = rocker_driver_name,
5246 .id_table = rocker_pci_id_table,
5247 .probe = rocker_probe,
5248 .remove = rocker_remove,
5249};
5250
Scott Feldman6c707942014-11-28 14:34:28 +01005251/************************************
5252 * Net device notifier event handler
5253 ************************************/
5254
Simon Hormane5054642015-05-25 14:28:36 +09005255static bool rocker_port_dev_check(const struct net_device *dev)
Scott Feldman6c707942014-11-28 14:34:28 +01005256{
5257 return dev->netdev_ops == &rocker_port_netdev_ops;
5258}
5259
5260static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5261 struct net_device *bridge)
5262{
Scott Feldman027e00d2015-06-01 11:39:05 -07005263 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005264 int err;
5265
Scott Feldman027e00d2015-06-01 11:39:05 -07005266 /* Port is joining bridge, so the internal VLAN for the
5267 * port is going to change to the bridge internal VLAN.
5268 * Let's remove untagged VLAN (vid=0) from port and
5269 * re-add once internal VLAN has changed.
5270 */
5271
5272 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5273 if (err)
5274 return err;
5275
Simon Hormandf6a2062015-05-21 12:40:17 +09005276 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005277 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005278 rocker_port->internal_vlan_id =
5279 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005280
5281 rocker_port->bridge_dev = bridge;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005282 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
Scott Feldman6c707942014-11-28 14:34:28 +01005283
Scott Feldman027e00d2015-06-01 11:39:05 -07005284 return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5285 untagged_vid, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005286}
5287
5288static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5289{
Scott Feldman027e00d2015-06-01 11:39:05 -07005290 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005291 int err;
5292
Scott Feldman027e00d2015-06-01 11:39:05 -07005293 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5294 if (err)
5295 return err;
5296
Simon Hormandf6a2062015-05-21 12:40:17 +09005297 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005298 rocker_port->bridge_dev->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005299 rocker_port->internal_vlan_id =
5300 rocker_port_internal_vlan_id_get(rocker_port,
5301 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005302
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005303 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5304 false);
Scott Feldman027e00d2015-06-01 11:39:05 -07005305 rocker_port->bridge_dev = NULL;
5306
5307 err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
5308 untagged_vid, 0);
Scott Feldmane47172a2015-02-25 20:15:38 -08005309 if (err)
5310 return err;
5311
5312 if (rocker_port->dev->flags & IFF_UP)
Scott Feldman179f9a22015-06-12 21:35:46 -07005313 err = rocker_port_fwd_enable(rocker_port,
5314 SWITCHDEV_TRANS_NONE, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005315
5316 return err;
5317}
5318
Simon Horman82549732015-07-16 10:39:14 +09005319
5320static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5321 struct net_device *master)
5322{
5323 int err;
5324
5325 rocker_port->bridge_dev = master;
5326
5327 err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
5328 if (err)
5329 return err;
5330 err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
5331
5332 return err;
5333}
5334
Jiri Pirko686ed302015-08-27 09:31:23 +02005335static int rocker_port_master_linked(struct rocker_port *rocker_port,
5336 struct net_device *master)
Scott Feldman6c707942014-11-28 14:34:28 +01005337{
Scott Feldman6c707942014-11-28 14:34:28 +01005338 int err = 0;
5339
Jiri Pirko686ed302015-08-27 09:31:23 +02005340 if (netif_is_bridge_master(master))
5341 err = rocker_port_bridge_join(rocker_port, master);
5342 else if (netif_is_ovs_master(master))
5343 err = rocker_port_ovs_changed(rocker_port, master);
5344 return err;
5345}
Scott Feldman6c707942014-11-28 14:34:28 +01005346
Jiri Pirko686ed302015-08-27 09:31:23 +02005347static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5348{
5349 int err = 0;
5350
5351 if (rocker_port_is_bridged(rocker_port))
5352 err = rocker_port_bridge_leave(rocker_port);
5353 else if (rocker_port_is_ovsed(rocker_port))
5354 err = rocker_port_ovs_changed(rocker_port, NULL);
Scott Feldman6c707942014-11-28 14:34:28 +01005355 return err;
5356}
5357
5358static int rocker_netdevice_event(struct notifier_block *unused,
5359 unsigned long event, void *ptr)
5360{
Jiri Pirko686ed302015-08-27 09:31:23 +02005361 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5362 struct netdev_notifier_changeupper_info *info;
5363 struct rocker_port *rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01005364 int err;
5365
Jiri Pirko686ed302015-08-27 09:31:23 +02005366 if (!rocker_port_dev_check(dev))
5367 return NOTIFY_DONE;
5368
Scott Feldman6c707942014-11-28 14:34:28 +01005369 switch (event) {
5370 case NETDEV_CHANGEUPPER:
Jiri Pirko686ed302015-08-27 09:31:23 +02005371 info = ptr;
5372 if (!info->master)
5373 goto out;
5374 rocker_port = netdev_priv(dev);
5375 if (info->linking) {
5376 err = rocker_port_master_linked(rocker_port,
5377 info->upper_dev);
5378 if (err)
5379 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5380 err);
5381 } else {
5382 err = rocker_port_master_unlinked(rocker_port);
5383 if (err)
5384 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5385 err);
5386 }
Scott Feldman6c707942014-11-28 14:34:28 +01005387 break;
5388 }
Jiri Pirko686ed302015-08-27 09:31:23 +02005389out:
Scott Feldman6c707942014-11-28 14:34:28 +01005390 return NOTIFY_DONE;
5391}
5392
5393static struct notifier_block rocker_netdevice_nb __read_mostly = {
5394 .notifier_call = rocker_netdevice_event,
5395};
5396
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005397/************************************
5398 * Net event notifier event handler
5399 ************************************/
5400
5401static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5402{
5403 struct rocker_port *rocker_port = netdev_priv(dev);
Scott Feldman02a9fbf2015-06-12 21:35:47 -07005404 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5405 ROCKER_OP_FLAG_NOWAIT;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005406 __be32 ip_addr = *(__be32 *)n->primary_key;
5407
Scott Feldmanc4f20322015-05-10 09:47:50 -07005408 return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
5409 flags, ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005410}
5411
5412static int rocker_netevent_event(struct notifier_block *unused,
5413 unsigned long event, void *ptr)
5414{
5415 struct net_device *dev;
5416 struct neighbour *n = ptr;
5417 int err;
5418
5419 switch (event) {
5420 case NETEVENT_NEIGH_UPDATE:
5421 if (n->tbl != &arp_tbl)
5422 return NOTIFY_DONE;
5423 dev = n->dev;
5424 if (!rocker_port_dev_check(dev))
5425 return NOTIFY_DONE;
5426 err = rocker_neigh_update(dev, n);
5427 if (err)
5428 netdev_warn(dev,
5429 "failed to handle neigh update (err %d)\n",
5430 err);
5431 break;
5432 }
5433
5434 return NOTIFY_DONE;
5435}
5436
5437static struct notifier_block rocker_netevent_nb __read_mostly = {
5438 .notifier_call = rocker_netevent_event,
5439};
5440
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005441/***********************
5442 * Module init and exit
5443 ***********************/
5444
5445static int __init rocker_module_init(void)
5446{
Scott Feldman6c707942014-11-28 14:34:28 +01005447 int err;
5448
5449 register_netdevice_notifier(&rocker_netdevice_nb);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005450 register_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005451 err = pci_register_driver(&rocker_pci_driver);
5452 if (err)
5453 goto err_pci_register_driver;
5454 return 0;
5455
5456err_pci_register_driver:
Gilad Ben-Yossefa076e6b2015-06-23 10:52:10 +03005457 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005458 unregister_netdevice_notifier(&rocker_netdevice_nb);
5459 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005460}
5461
5462static void __exit rocker_module_exit(void)
5463{
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005464 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005465 unregister_netdevice_notifier(&rocker_netdevice_nb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005466 pci_unregister_driver(&rocker_pci_driver);
5467}
5468
5469module_init(rocker_module_init);
5470module_exit(rocker_module_exit);
5471
5472MODULE_LICENSE("GPL v2");
5473MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5474MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5475MODULE_DESCRIPTION("Rocker switch device driver");
5476MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);