blob: 102b37d95d85a575e4ca7fc5ee91704ac7545570 [file] [log] [blame]
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010019#include <linux/hashtable.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010020#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
Scott Feldman6c707942014-11-28 14:34:28 +010031#include <linux/if_bridge.h>
Scott Feldman9f6bbf72014-11-28 14:34:27 +010032#include <linux/bitops.h>
David Aherndb191702015-03-17 20:23:16 -060033#include <linux/ctype.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010034#include <net/switchdev.h>
35#include <net/rtnetlink.h>
Scott Feldmanc1beeef2015-03-05 21:21:20 -080036#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
Jiri Pirko4b8ac962014-11-28 14:34:26 +010039#include <asm-generic/io-64-nonatomic-lo-hi.h>
40#include <generated/utsrelease.h>
41
42#include "rocker.h"
43
44static const char rocker_driver_name[] = "rocker";
45
46static const struct pci_device_id rocker_pci_id_table[] = {
47 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
48 {0, }
49};
50
Scott Feldman9f6bbf72014-11-28 14:34:27 +010051struct rocker_flow_tbl_key {
52 u32 priority;
53 enum rocker_of_dpa_table_id tbl_id;
54 union {
55 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080056 u32 in_pport;
57 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010058 enum rocker_of_dpa_table_id goto_tbl;
59 } ig_port;
60 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080061 u32 in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010062 __be16 vlan_id;
63 __be16 vlan_id_mask;
64 enum rocker_of_dpa_table_id goto_tbl;
65 bool untagged;
66 __be16 new_vlan_id;
67 } vlan;
68 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080069 u32 in_pport;
70 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +010071 __be16 eth_type;
72 u8 eth_dst[ETH_ALEN];
73 u8 eth_dst_mask[ETH_ALEN];
74 __be16 vlan_id;
75 __be16 vlan_id_mask;
76 enum rocker_of_dpa_table_id goto_tbl;
77 bool copy_to_cpu;
78 } term_mac;
79 struct {
80 __be16 eth_type;
81 __be32 dst4;
82 __be32 dst4_mask;
83 enum rocker_of_dpa_table_id goto_tbl;
84 u32 group_id;
85 } ucast_routing;
86 struct {
87 u8 eth_dst[ETH_ALEN];
88 u8 eth_dst_mask[ETH_ALEN];
89 int has_eth_dst;
90 int has_eth_dst_mask;
91 __be16 vlan_id;
92 u32 tunnel_id;
93 enum rocker_of_dpa_table_id goto_tbl;
94 u32 group_id;
95 bool copy_to_cpu;
96 } bridge;
97 struct {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -080098 u32 in_pport;
99 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100100 u8 eth_src[ETH_ALEN];
101 u8 eth_src_mask[ETH_ALEN];
102 u8 eth_dst[ETH_ALEN];
103 u8 eth_dst_mask[ETH_ALEN];
104 __be16 eth_type;
105 __be16 vlan_id;
106 __be16 vlan_id_mask;
107 u8 ip_proto;
108 u8 ip_proto_mask;
109 u8 ip_tos;
110 u8 ip_tos_mask;
111 u32 group_id;
112 } acl;
113 };
114};
115
116struct rocker_flow_tbl_entry {
117 struct hlist_node entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800118 u32 cmd;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100119 u64 cookie;
120 struct rocker_flow_tbl_key key;
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800121 size_t key_len;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100122 u32 key_crc32; /* key */
123};
124
125struct rocker_group_tbl_entry {
126 struct hlist_node entry;
127 u32 cmd;
128 u32 group_id; /* key */
129 u16 group_count;
130 u32 *group_ids;
131 union {
132 struct {
133 u8 pop_vlan;
134 } l2_interface;
135 struct {
136 u8 eth_src[ETH_ALEN];
137 u8 eth_dst[ETH_ALEN];
138 __be16 vlan_id;
139 u32 group_id;
140 } l2_rewrite;
141 struct {
142 u8 eth_src[ETH_ALEN];
143 u8 eth_dst[ETH_ALEN];
144 __be16 vlan_id;
145 bool ttl_check;
146 u32 group_id;
147 } l3_unicast;
148 };
149};
150
151struct rocker_fdb_tbl_entry {
152 struct hlist_node entry;
153 u32 key_crc32; /* key */
154 bool learned;
Scott Feldmana471be42015-09-23 08:39:14 -0700155 unsigned long touched;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100156 struct rocker_fdb_tbl_key {
Scott Feldman4c660492015-09-23 08:39:15 -0700157 struct rocker_port *rocker_port;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100158 u8 addr[ETH_ALEN];
159 __be16 vlan_id;
160 } key;
161};
162
163struct rocker_internal_vlan_tbl_entry {
164 struct hlist_node entry;
165 int ifindex; /* key */
166 u32 ref_count;
167 __be16 vlan_id;
168};
169
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800170struct rocker_neigh_tbl_entry {
171 struct hlist_node entry;
172 __be32 ip_addr; /* key */
173 struct net_device *dev;
174 u32 ref_count;
175 u32 index;
176 u8 eth_dst[ETH_ALEN];
177 bool ttl_check;
178};
179
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100180struct rocker_desc_info {
181 char *data; /* mapped */
182 size_t data_size;
183 size_t tlv_size;
184 struct rocker_desc *desc;
Scott Feldman4725ceb2015-05-10 09:48:08 -0700185 dma_addr_t mapaddr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100186};
187
188struct rocker_dma_ring_info {
189 size_t size;
190 u32 head;
191 u32 tail;
192 struct rocker_desc *desc; /* mapped */
193 dma_addr_t mapaddr;
194 struct rocker_desc_info *desc_info;
195 unsigned int type;
196};
197
198struct rocker;
199
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100200enum {
201 ROCKER_CTRL_LINK_LOCAL_MCAST,
202 ROCKER_CTRL_LOCAL_ARP,
203 ROCKER_CTRL_IPV4_MCAST,
204 ROCKER_CTRL_IPV6_MCAST,
205 ROCKER_CTRL_DFLT_BRIDGING,
Simon Horman82549732015-07-16 10:39:14 +0900206 ROCKER_CTRL_DFLT_OVS,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100207 ROCKER_CTRL_MAX,
208};
209
210#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
211#define ROCKER_N_INTERNAL_VLANS 255
212#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
213#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
214
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100215struct rocker_port {
216 struct net_device *dev;
Scott Feldman6c707942014-11-28 14:34:28 +0100217 struct net_device *bridge_dev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100218 struct rocker *rocker;
219 unsigned int port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -0800220 u32 pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100221 __be16 internal_vlan_id;
Scott Feldman6c707942014-11-28 14:34:28 +0100222 int stp_state;
Scott Feldman5111f802014-11-28 14:34:30 +0100223 u32 brport_flags;
Scott Feldmane7335702015-09-23 08:39:17 -0700224 unsigned long ageing_time;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100225 bool ctrls[ROCKER_CTRL_MAX];
226 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100227 struct napi_struct napi_tx;
228 struct napi_struct napi_rx;
229 struct rocker_dma_ring_info tx_ring;
230 struct rocker_dma_ring_info rx_ring;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700231 struct list_head trans_mem;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100232};
233
234struct rocker {
235 struct pci_dev *pdev;
236 u8 __iomem *hw_addr;
237 struct msix_entry *msix_entries;
238 unsigned int port_count;
239 struct rocker_port **ports;
240 struct {
241 u64 id;
242 } hw;
Scott Feldman4725ceb2015-05-10 09:48:08 -0700243 spinlock_t cmd_ring_lock; /* for cmd ring accesses */
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100244 struct rocker_dma_ring_info cmd_ring;
245 struct rocker_dma_ring_info event_ring;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100246 DECLARE_HASHTABLE(flow_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700247 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100248 u64 flow_tbl_next_cookie;
249 DECLARE_HASHTABLE(group_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700250 spinlock_t group_tbl_lock; /* for group tbl accesses */
Scott Feldman52fe3e22015-09-23 08:39:18 -0700251 struct timer_list fdb_cleanup_timer;
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100252 DECLARE_HASHTABLE(fdb_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700253 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100254 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
255 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700256 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800257 DECLARE_HASHTABLE(neigh_tbl, 16);
Scott Feldman4725ceb2015-05-10 09:48:08 -0700258 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
Scott Feldmanc1beeef2015-03-05 21:21:20 -0800259 u32 neigh_tbl_next_index;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100260};
261
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100262static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
263static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
264static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
265static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
266static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
267static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
268static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
269static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
270static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
271
272/* Rocker priority levels for flow table entries. Higher
273 * priority match takes precedence over lower priority match.
274 */
275
276enum {
277 ROCKER_PRIORITY_UNKNOWN = 0,
278 ROCKER_PRIORITY_IG_PORT = 1,
279 ROCKER_PRIORITY_VLAN = 1,
280 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
281 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100282 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
283 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
284 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
285 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
286 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
287 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
288 ROCKER_PRIORITY_ACL_CTRL = 3,
289 ROCKER_PRIORITY_ACL_NORMAL = 2,
290 ROCKER_PRIORITY_ACL_DFLT = 1,
291};
292
293static bool rocker_vlan_id_is_internal(__be16 vlan_id)
294{
295 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
296 u16 end = 0xffe;
297 u16 _vlan_id = ntohs(vlan_id);
298
299 return (_vlan_id >= start && _vlan_id <= end);
300}
301
Simon Hormane5054642015-05-25 14:28:36 +0900302static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +0100303 u16 vid, bool *pop_vlan)
304{
305 __be16 vlan_id;
306
307 if (pop_vlan)
308 *pop_vlan = false;
309 vlan_id = htons(vid);
310 if (!vlan_id) {
311 vlan_id = rocker_port->internal_vlan_id;
312 if (pop_vlan)
313 *pop_vlan = true;
314 }
315
316 return vlan_id;
317}
318
Simon Hormane5054642015-05-25 14:28:36 +0900319static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +0100320 __be16 vlan_id)
321{
322 if (rocker_vlan_id_is_internal(vlan_id))
323 return 0;
324
325 return ntohs(vlan_id);
326}
327
Simon Hormane5054642015-05-25 14:28:36 +0900328static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +0100329{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200330 return rocker_port->bridge_dev &&
331 netif_is_bridge_master(rocker_port->bridge_dev);
Simon Horman82549732015-07-16 10:39:14 +0900332}
333
334static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
335{
Jiri Pirkofb4bf212015-08-27 09:31:22 +0200336 return rocker_port->bridge_dev &&
337 netif_is_ovs_master(rocker_port->bridge_dev);
Scott Feldman6c707942014-11-28 14:34:28 +0100338}
339
Scott Feldman179f9a22015-06-12 21:35:46 -0700340#define ROCKER_OP_FLAG_REMOVE BIT(0)
341#define ROCKER_OP_FLAG_NOWAIT BIT(1)
342#define ROCKER_OP_FLAG_LEARNED BIT(2)
343#define ROCKER_OP_FLAG_REFRESH BIT(3)
344
Scott Feldmanc4f20322015-05-10 09:47:50 -0700345static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +0200346 struct switchdev_trans *trans, int flags,
Scott Feldman179f9a22015-06-12 21:35:46 -0700347 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700348{
349 struct list_head *elem = NULL;
Scott Feldman179f9a22015-06-12 21:35:46 -0700350 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
351 GFP_ATOMIC : GFP_KERNEL;
Scott Feldmanc4f20322015-05-10 09:47:50 -0700352
353 /* If in transaction prepare phase, allocate the memory
354 * and enqueue it on a per-port list. If in transaction
355 * commit phase, dequeue the memory from the per-port list
356 * rather than re-allocating the memory. The idea is the
357 * driver code paths for prepare and commit are identical
358 * so the memory allocated in the prepare phase is the
359 * memory used in the commit phase.
360 */
361
Jiri Pirko76c6f942015-09-24 10:02:44 +0200362 if (!trans) {
363 elem = kzalloc(size + sizeof(*elem), gfp_flags);
364 if (elem)
365 INIT_LIST_HEAD(elem);
366 } else if (switchdev_trans_ph_prepare(trans)) {
Scott Feldman179f9a22015-06-12 21:35:46 -0700367 elem = kzalloc(size + sizeof(*elem), gfp_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700368 if (!elem)
369 return NULL;
370 list_add_tail(elem, &rocker_port->trans_mem);
Jiri Pirko76c6f942015-09-24 10:02:44 +0200371 } else {
Scott Feldmanc4f20322015-05-10 09:47:50 -0700372 BUG_ON(list_empty(&rocker_port->trans_mem));
373 elem = rocker_port->trans_mem.next;
374 list_del_init(elem);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700375 }
376
377 return elem ? elem + 1 : NULL;
378}
379
380static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +0200381 struct switchdev_trans *trans, int flags,
Scott Feldman179f9a22015-06-12 21:35:46 -0700382 size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700383{
Jiri Pirko76c6f942015-09-24 10:02:44 +0200384 return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700385}
386
387static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +0200388 struct switchdev_trans *trans, int flags,
Scott Feldman179f9a22015-06-12 21:35:46 -0700389 size_t n, size_t size)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700390{
Jiri Pirko76c6f942015-09-24 10:02:44 +0200391 return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
Scott Feldmanc4f20322015-05-10 09:47:50 -0700392}
393
Jiri Pirko76c6f942015-09-24 10:02:44 +0200394static void rocker_port_kfree(struct switchdev_trans *trans, const void *mem)
Scott Feldmanc4f20322015-05-10 09:47:50 -0700395{
396 struct list_head *elem;
397
398 /* Frees are ignored if in transaction prepare phase. The
399 * memory remains on the per-port list until freed in the
400 * commit phase.
401 */
402
Jiri Pirko76c6f942015-09-24 10:02:44 +0200403 if (switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -0700404 return;
405
406 elem = (struct list_head *)mem - 1;
407 BUG_ON(!list_empty(elem));
408 kfree(elem);
409}
410
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100411struct rocker_wait {
412 wait_queue_head_t wait;
413 bool done;
Scott Feldman179f9a22015-06-12 21:35:46 -0700414 bool nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100415};
416
417static void rocker_wait_reset(struct rocker_wait *wait)
418{
419 wait->done = false;
Scott Feldman179f9a22015-06-12 21:35:46 -0700420 wait->nowait = false;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100421}
422
423static void rocker_wait_init(struct rocker_wait *wait)
424{
425 init_waitqueue_head(&wait->wait);
426 rocker_wait_reset(wait);
427}
428
Scott Feldmanc4f20322015-05-10 09:47:50 -0700429static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +0200430 struct switchdev_trans *trans,
Scott Feldman179f9a22015-06-12 21:35:46 -0700431 int flags)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100432{
433 struct rocker_wait *wait;
434
Jiri Pirko76c6f942015-09-24 10:02:44 +0200435 wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100436 if (!wait)
437 return NULL;
438 rocker_wait_init(wait);
439 return wait;
440}
441
Jiri Pirko76c6f942015-09-24 10:02:44 +0200442static void rocker_wait_destroy(struct switchdev_trans *trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -0700443 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100444{
Jiri Pirko76c6f942015-09-24 10:02:44 +0200445 rocker_port_kfree(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100446}
447
448static bool rocker_wait_event_timeout(struct rocker_wait *wait,
449 unsigned long timeout)
450{
451 wait_event_timeout(wait->wait, wait->done, HZ / 10);
452 if (!wait->done)
453 return false;
454 return true;
455}
456
457static void rocker_wait_wake_up(struct rocker_wait *wait)
458{
459 wait->done = true;
460 wake_up(&wait->wait);
461}
462
Simon Hormane5054642015-05-25 14:28:36 +0900463static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100464{
465 return rocker->msix_entries[vector].vector;
466}
467
Simon Hormane5054642015-05-25 14:28:36 +0900468static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100469{
470 return rocker_msix_vector(rocker_port->rocker,
471 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
472}
473
Simon Hormane5054642015-05-25 14:28:36 +0900474static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100475{
476 return rocker_msix_vector(rocker_port->rocker,
477 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
478}
479
480#define rocker_write32(rocker, reg, val) \
481 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
482#define rocker_read32(rocker, reg) \
483 readl((rocker)->hw_addr + (ROCKER_ ## reg))
484#define rocker_write64(rocker, reg, val) \
485 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
486#define rocker_read64(rocker, reg) \
487 readq((rocker)->hw_addr + (ROCKER_ ## reg))
488
489/*****************************
490 * HW basic testing functions
491 *****************************/
492
Simon Hormane5054642015-05-25 14:28:36 +0900493static int rocker_reg_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100494{
Simon Hormane5054642015-05-25 14:28:36 +0900495 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100496 u64 test_reg;
497 u64 rnd;
498
499 rnd = prandom_u32();
500 rnd >>= 1;
501 rocker_write32(rocker, TEST_REG, rnd);
502 test_reg = rocker_read32(rocker, TEST_REG);
503 if (test_reg != rnd * 2) {
504 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
505 test_reg, rnd * 2);
506 return -EIO;
507 }
508
509 rnd = prandom_u32();
510 rnd <<= 31;
511 rnd |= prandom_u32();
512 rocker_write64(rocker, TEST_REG64, rnd);
513 test_reg = rocker_read64(rocker, TEST_REG64);
514 if (test_reg != rnd * 2) {
515 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
516 test_reg, rnd * 2);
517 return -EIO;
518 }
519
520 return 0;
521}
522
Simon Hormane5054642015-05-25 14:28:36 +0900523static int rocker_dma_test_one(const struct rocker *rocker,
524 struct rocker_wait *wait, u32 test_type,
525 dma_addr_t dma_handle, const unsigned char *buf,
526 const unsigned char *expect, size_t size)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100527{
Simon Hormane5054642015-05-25 14:28:36 +0900528 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100529 int i;
530
531 rocker_wait_reset(wait);
532 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
533
534 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
535 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
536 return -EIO;
537 }
538
539 for (i = 0; i < size; i++) {
540 if (buf[i] != expect[i]) {
541 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
542 buf[i], i, expect[i]);
543 return -EIO;
544 }
545 }
546 return 0;
547}
548
549#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
550#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
551
Simon Hormane5054642015-05-25 14:28:36 +0900552static int rocker_dma_test_offset(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100553 struct rocker_wait *wait, int offset)
554{
555 struct pci_dev *pdev = rocker->pdev;
556 unsigned char *alloc;
557 unsigned char *buf;
558 unsigned char *expect;
559 dma_addr_t dma_handle;
560 int i;
561 int err;
562
563 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
564 GFP_KERNEL | GFP_DMA);
565 if (!alloc)
566 return -ENOMEM;
567 buf = alloc + offset;
568 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
569
570 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
571 PCI_DMA_BIDIRECTIONAL);
572 if (pci_dma_mapping_error(pdev, dma_handle)) {
573 err = -EIO;
574 goto free_alloc;
575 }
576
577 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
578 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
579
580 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
581 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
582 dma_handle, buf, expect,
583 ROCKER_TEST_DMA_BUF_SIZE);
584 if (err)
585 goto unmap;
586
587 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
588 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
589 dma_handle, buf, expect,
590 ROCKER_TEST_DMA_BUF_SIZE);
591 if (err)
592 goto unmap;
593
594 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
595 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
596 expect[i] = ~buf[i];
597 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
598 dma_handle, buf, expect,
599 ROCKER_TEST_DMA_BUF_SIZE);
600 if (err)
601 goto unmap;
602
603unmap:
604 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
605 PCI_DMA_BIDIRECTIONAL);
606free_alloc:
607 kfree(alloc);
608
609 return err;
610}
611
Simon Hormane5054642015-05-25 14:28:36 +0900612static int rocker_dma_test(const struct rocker *rocker,
613 struct rocker_wait *wait)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100614{
615 int i;
616 int err;
617
618 for (i = 0; i < 8; i++) {
619 err = rocker_dma_test_offset(rocker, wait, i);
620 if (err)
621 return err;
622 }
623 return 0;
624}
625
626static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
627{
628 struct rocker_wait *wait = dev_id;
629
630 rocker_wait_wake_up(wait);
631
632 return IRQ_HANDLED;
633}
634
Simon Hormane5054642015-05-25 14:28:36 +0900635static int rocker_basic_hw_test(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100636{
Simon Hormane5054642015-05-25 14:28:36 +0900637 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100638 struct rocker_wait wait;
639 int err;
640
641 err = rocker_reg_test(rocker);
642 if (err) {
643 dev_err(&pdev->dev, "reg test failed\n");
644 return err;
645 }
646
647 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
648 rocker_test_irq_handler, 0,
649 rocker_driver_name, &wait);
650 if (err) {
651 dev_err(&pdev->dev, "cannot assign test irq\n");
652 return err;
653 }
654
655 rocker_wait_init(&wait);
656 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
657
658 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
659 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
660 err = -EIO;
661 goto free_irq;
662 }
663
664 err = rocker_dma_test(rocker, &wait);
665 if (err)
666 dev_err(&pdev->dev, "dma test failed\n");
667
668free_irq:
669 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
670 return err;
671}
672
673/******
674 * TLV
675 ******/
676
677#define ROCKER_TLV_ALIGNTO 8U
678#define ROCKER_TLV_ALIGN(len) \
679 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
680#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
681
682/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
683 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
684 * | Header | Pad | Payload | Pad |
685 * | (struct rocker_tlv) | ing | | ing |
686 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
687 * <--------------------------- tlv->len -------------------------->
688 */
689
690static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
691 int *remaining)
692{
693 int totlen = ROCKER_TLV_ALIGN(tlv->len);
694
695 *remaining -= totlen;
696 return (struct rocker_tlv *) ((char *) tlv + totlen);
697}
698
699static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
700{
701 return remaining >= (int) ROCKER_TLV_HDRLEN &&
702 tlv->len >= ROCKER_TLV_HDRLEN &&
703 tlv->len <= remaining;
704}
705
706#define rocker_tlv_for_each(pos, head, len, rem) \
707 for (pos = head, rem = len; \
708 rocker_tlv_ok(pos, rem); \
709 pos = rocker_tlv_next(pos, &(rem)))
710
711#define rocker_tlv_for_each_nested(pos, tlv, rem) \
712 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
713 rocker_tlv_len(tlv), rem)
714
715static int rocker_tlv_attr_size(int payload)
716{
717 return ROCKER_TLV_HDRLEN + payload;
718}
719
720static int rocker_tlv_total_size(int payload)
721{
722 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
723}
724
725static int rocker_tlv_padlen(int payload)
726{
727 return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
728}
729
730static int rocker_tlv_type(const struct rocker_tlv *tlv)
731{
732 return tlv->type;
733}
734
735static void *rocker_tlv_data(const struct rocker_tlv *tlv)
736{
737 return (char *) tlv + ROCKER_TLV_HDRLEN;
738}
739
740static int rocker_tlv_len(const struct rocker_tlv *tlv)
741{
742 return tlv->len - ROCKER_TLV_HDRLEN;
743}
744
745static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
746{
747 return *(u8 *) rocker_tlv_data(tlv);
748}
749
750static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
751{
752 return *(u16 *) rocker_tlv_data(tlv);
753}
754
Jiri Pirko9b03c712014-12-03 14:14:53 +0100755static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
756{
757 return *(__be16 *) rocker_tlv_data(tlv);
758}
759
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100760static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
761{
762 return *(u32 *) rocker_tlv_data(tlv);
763}
764
765static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
766{
767 return *(u64 *) rocker_tlv_data(tlv);
768}
769
Simon Hormane5054642015-05-25 14:28:36 +0900770static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100771 const char *buf, int buf_len)
772{
773 const struct rocker_tlv *tlv;
774 const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
775 int rem;
776
777 memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
778
779 rocker_tlv_for_each(tlv, head, buf_len, rem) {
780 u32 type = rocker_tlv_type(tlv);
781
782 if (type > 0 && type <= maxtype)
Simon Hormane5054642015-05-25 14:28:36 +0900783 tb[type] = tlv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100784 }
785}
786
Simon Hormane5054642015-05-25 14:28:36 +0900787static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100788 const struct rocker_tlv *tlv)
789{
790 rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
791 rocker_tlv_len(tlv));
792}
793
Simon Hormane5054642015-05-25 14:28:36 +0900794static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
795 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100796{
797 rocker_tlv_parse(tb, maxtype, desc_info->data,
798 desc_info->desc->tlv_size);
799}
800
801static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
802{
803 return (struct rocker_tlv *) ((char *) desc_info->data +
804 desc_info->tlv_size);
805}
806
807static int rocker_tlv_put(struct rocker_desc_info *desc_info,
808 int attrtype, int attrlen, const void *data)
809{
810 int tail_room = desc_info->data_size - desc_info->tlv_size;
811 int total_size = rocker_tlv_total_size(attrlen);
812 struct rocker_tlv *tlv;
813
814 if (unlikely(tail_room < total_size))
815 return -EMSGSIZE;
816
817 tlv = rocker_tlv_start(desc_info);
818 desc_info->tlv_size += total_size;
819 tlv->type = attrtype;
820 tlv->len = rocker_tlv_attr_size(attrlen);
821 memcpy(rocker_tlv_data(tlv), data, attrlen);
822 memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
823 return 0;
824}
825
826static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
827 int attrtype, u8 value)
828{
829 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
830}
831
832static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
833 int attrtype, u16 value)
834{
835 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
836}
837
Jiri Pirko9b03c712014-12-03 14:14:53 +0100838static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
839 int attrtype, __be16 value)
840{
841 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
842}
843
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100844static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
845 int attrtype, u32 value)
846{
847 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
848}
849
Jiri Pirko9b03c712014-12-03 14:14:53 +0100850static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
851 int attrtype, __be32 value)
852{
853 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
854}
855
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100856static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
857 int attrtype, u64 value)
858{
859 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
860}
861
862static struct rocker_tlv *
863rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
864{
865 struct rocker_tlv *start = rocker_tlv_start(desc_info);
866
867 if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
868 return NULL;
869
870 return start;
871}
872
873static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
874 struct rocker_tlv *start)
875{
876 start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
877}
878
879static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +0900880 const struct rocker_tlv *start)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100881{
Simon Hormane5054642015-05-25 14:28:36 +0900882 desc_info->tlv_size = (const char *) start - desc_info->data;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100883}
884
885/******************************************
886 * DMA rings and descriptors manipulations
887 ******************************************/
888
889static u32 __pos_inc(u32 pos, size_t limit)
890{
891 return ++pos == limit ? 0 : pos;
892}
893
Simon Hormane5054642015-05-25 14:28:36 +0900894static int rocker_desc_err(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100895{
Scott Feldman7eb344f2015-02-25 20:15:36 -0800896 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
897
898 switch (err) {
899 case ROCKER_OK:
900 return 0;
901 case -ROCKER_ENOENT:
902 return -ENOENT;
903 case -ROCKER_ENXIO:
904 return -ENXIO;
905 case -ROCKER_ENOMEM:
906 return -ENOMEM;
907 case -ROCKER_EEXIST:
908 return -EEXIST;
909 case -ROCKER_EINVAL:
910 return -EINVAL;
911 case -ROCKER_EMSGSIZE:
912 return -EMSGSIZE;
913 case -ROCKER_ENOTSUP:
914 return -EOPNOTSUPP;
915 case -ROCKER_ENOBUFS:
916 return -ENOBUFS;
917 }
918
919 return -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100920}
921
Simon Hormane5054642015-05-25 14:28:36 +0900922static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100923{
924 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
925}
926
Simon Hormane5054642015-05-25 14:28:36 +0900927static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100928{
929 u32 comp_err = desc_info->desc->comp_err;
930
931 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
932}
933
Simon Hormane5054642015-05-25 14:28:36 +0900934static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100935{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100936 return (void *)(uintptr_t)desc_info->desc->cookie;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100937}
938
Simon Hormane5054642015-05-25 14:28:36 +0900939static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100940 void *ptr)
941{
Arnd Bergmannadedf372015-01-13 15:23:52 +0100942 desc_info->desc->cookie = (uintptr_t) ptr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100943}
944
945static struct rocker_desc_info *
Simon Hormane5054642015-05-25 14:28:36 +0900946rocker_desc_head_get(const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100947{
948 static struct rocker_desc_info *desc_info;
949 u32 head = __pos_inc(info->head, info->size);
950
951 desc_info = &info->desc_info[info->head];
952 if (head == info->tail)
953 return NULL; /* ring full */
954 desc_info->tlv_size = 0;
955 return desc_info;
956}
957
Simon Hormane5054642015-05-25 14:28:36 +0900958static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100959{
960 desc_info->desc->buf_size = desc_info->data_size;
961 desc_info->desc->tlv_size = desc_info->tlv_size;
962}
963
Simon Hormane5054642015-05-25 14:28:36 +0900964static void rocker_desc_head_set(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100965 struct rocker_dma_ring_info *info,
Simon Hormane5054642015-05-25 14:28:36 +0900966 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100967{
968 u32 head = __pos_inc(info->head, info->size);
969
970 BUG_ON(head == info->tail);
971 rocker_desc_commit(desc_info);
972 info->head = head;
973 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
974}
975
976static struct rocker_desc_info *
977rocker_desc_tail_get(struct rocker_dma_ring_info *info)
978{
979 static struct rocker_desc_info *desc_info;
980
981 if (info->tail == info->head)
982 return NULL; /* nothing to be done between head and tail */
983 desc_info = &info->desc_info[info->tail];
984 if (!rocker_desc_gen(desc_info))
985 return NULL; /* gen bit not set, desc is not ready yet */
986 info->tail = __pos_inc(info->tail, info->size);
987 desc_info->tlv_size = desc_info->desc->tlv_size;
988 return desc_info;
989}
990
Simon Hormane5054642015-05-25 14:28:36 +0900991static void rocker_dma_ring_credits_set(const struct rocker *rocker,
992 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +0100993 u32 credits)
994{
995 if (credits)
996 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
997}
998
999static unsigned long rocker_dma_ring_size_fix(size_t size)
1000{
1001 return max(ROCKER_DMA_SIZE_MIN,
1002 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
1003}
1004
Simon Hormane5054642015-05-25 14:28:36 +09001005static int rocker_dma_ring_create(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001006 unsigned int type,
1007 size_t size,
1008 struct rocker_dma_ring_info *info)
1009{
1010 int i;
1011
1012 BUG_ON(size != rocker_dma_ring_size_fix(size));
1013 info->size = size;
1014 info->type = type;
1015 info->head = 0;
1016 info->tail = 0;
1017 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
1018 GFP_KERNEL);
1019 if (!info->desc_info)
1020 return -ENOMEM;
1021
1022 info->desc = pci_alloc_consistent(rocker->pdev,
1023 info->size * sizeof(*info->desc),
1024 &info->mapaddr);
1025 if (!info->desc) {
1026 kfree(info->desc_info);
1027 return -ENOMEM;
1028 }
1029
1030 for (i = 0; i < info->size; i++)
1031 info->desc_info[i].desc = &info->desc[i];
1032
1033 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
1034 ROCKER_DMA_DESC_CTRL_RESET);
1035 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
1036 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
1037
1038 return 0;
1039}
1040
Simon Hormane5054642015-05-25 14:28:36 +09001041static void rocker_dma_ring_destroy(const struct rocker *rocker,
1042 const struct rocker_dma_ring_info *info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001043{
1044 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
1045
1046 pci_free_consistent(rocker->pdev,
1047 info->size * sizeof(struct rocker_desc),
1048 info->desc, info->mapaddr);
1049 kfree(info->desc_info);
1050}
1051
Simon Hormane5054642015-05-25 14:28:36 +09001052static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001053 struct rocker_dma_ring_info *info)
1054{
1055 int i;
1056
1057 BUG_ON(info->head || info->tail);
1058
1059 /* When ring is consumer, we need to advance head for each desc.
1060 * That tells hw that the desc is ready to be used by it.
1061 */
1062 for (i = 0; i < info->size - 1; i++)
1063 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
1064 rocker_desc_commit(&info->desc_info[i]);
1065}
1066
Simon Hormane5054642015-05-25 14:28:36 +09001067static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
1068 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001069 int direction, size_t buf_size)
1070{
1071 struct pci_dev *pdev = rocker->pdev;
1072 int i;
1073 int err;
1074
1075 for (i = 0; i < info->size; i++) {
1076 struct rocker_desc_info *desc_info = &info->desc_info[i];
1077 struct rocker_desc *desc = &info->desc[i];
1078 dma_addr_t dma_handle;
1079 char *buf;
1080
1081 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
1082 if (!buf) {
1083 err = -ENOMEM;
1084 goto rollback;
1085 }
1086
1087 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1088 if (pci_dma_mapping_error(pdev, dma_handle)) {
1089 kfree(buf);
1090 err = -EIO;
1091 goto rollback;
1092 }
1093
1094 desc_info->data = buf;
1095 desc_info->data_size = buf_size;
1096 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1097
1098 desc->buf_addr = dma_handle;
1099 desc->buf_size = buf_size;
1100 }
1101 return 0;
1102
1103rollback:
1104 for (i--; i >= 0; i--) {
Simon Hormane5054642015-05-25 14:28:36 +09001105 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001106
1107 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1108 desc_info->data_size, direction);
1109 kfree(desc_info->data);
1110 }
1111 return err;
1112}
1113
Simon Hormane5054642015-05-25 14:28:36 +09001114static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
1115 const struct rocker_dma_ring_info *info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001116 int direction)
1117{
1118 struct pci_dev *pdev = rocker->pdev;
1119 int i;
1120
1121 for (i = 0; i < info->size; i++) {
Simon Hormane5054642015-05-25 14:28:36 +09001122 const struct rocker_desc_info *desc_info = &info->desc_info[i];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001123 struct rocker_desc *desc = &info->desc[i];
1124
1125 desc->buf_addr = 0;
1126 desc->buf_size = 0;
1127 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1128 desc_info->data_size, direction);
1129 kfree(desc_info->data);
1130 }
1131}
1132
1133static int rocker_dma_rings_init(struct rocker *rocker)
1134{
Simon Hormane5054642015-05-25 14:28:36 +09001135 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001136 int err;
1137
1138 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1139 ROCKER_DMA_CMD_DEFAULT_SIZE,
1140 &rocker->cmd_ring);
1141 if (err) {
1142 dev_err(&pdev->dev, "failed to create command dma ring\n");
1143 return err;
1144 }
1145
1146 spin_lock_init(&rocker->cmd_ring_lock);
1147
1148 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1149 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1150 if (err) {
1151 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1152 goto err_dma_cmd_ring_bufs_alloc;
1153 }
1154
1155 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1156 ROCKER_DMA_EVENT_DEFAULT_SIZE,
1157 &rocker->event_ring);
1158 if (err) {
1159 dev_err(&pdev->dev, "failed to create event dma ring\n");
1160 goto err_dma_event_ring_create;
1161 }
1162
1163 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1164 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1165 if (err) {
1166 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1167 goto err_dma_event_ring_bufs_alloc;
1168 }
1169 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1170 return 0;
1171
1172err_dma_event_ring_bufs_alloc:
1173 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1174err_dma_event_ring_create:
1175 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1176 PCI_DMA_BIDIRECTIONAL);
1177err_dma_cmd_ring_bufs_alloc:
1178 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1179 return err;
1180}
1181
1182static void rocker_dma_rings_fini(struct rocker *rocker)
1183{
1184 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1185 PCI_DMA_BIDIRECTIONAL);
1186 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1187 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1188 PCI_DMA_BIDIRECTIONAL);
1189 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1190}
1191
Simon Horman534ba6a2015-06-01 13:25:04 +09001192static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001193 struct rocker_desc_info *desc_info,
1194 struct sk_buff *skb, size_t buf_len)
1195{
Simon Horman534ba6a2015-06-01 13:25:04 +09001196 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001197 struct pci_dev *pdev = rocker->pdev;
1198 dma_addr_t dma_handle;
1199
1200 dma_handle = pci_map_single(pdev, skb->data, buf_len,
1201 PCI_DMA_FROMDEVICE);
1202 if (pci_dma_mapping_error(pdev, dma_handle))
1203 return -EIO;
1204 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1205 goto tlv_put_failure;
1206 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1207 goto tlv_put_failure;
1208 return 0;
1209
1210tlv_put_failure:
1211 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1212 desc_info->tlv_size = 0;
1213 return -EMSGSIZE;
1214}
1215
Simon Hormane5054642015-05-25 14:28:36 +09001216static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001217{
1218 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1219}
1220
Simon Horman534ba6a2015-06-01 13:25:04 +09001221static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001222 struct rocker_desc_info *desc_info)
1223{
1224 struct net_device *dev = rocker_port->dev;
1225 struct sk_buff *skb;
1226 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1227 int err;
1228
1229 /* Ensure that hw will see tlv_size zero in case of an error.
1230 * That tells hw to use another descriptor.
1231 */
1232 rocker_desc_cookie_ptr_set(desc_info, NULL);
1233 desc_info->tlv_size = 0;
1234
1235 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1236 if (!skb)
1237 return -ENOMEM;
Simon Horman534ba6a2015-06-01 13:25:04 +09001238 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001239 if (err) {
1240 dev_kfree_skb_any(skb);
1241 return err;
1242 }
1243 rocker_desc_cookie_ptr_set(desc_info, skb);
1244 return 0;
1245}
1246
Simon Hormane5054642015-05-25 14:28:36 +09001247static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1248 const struct rocker_tlv **attrs)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001249{
1250 struct pci_dev *pdev = rocker->pdev;
1251 dma_addr_t dma_handle;
1252 size_t len;
1253
1254 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1255 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1256 return;
1257 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1258 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1259 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1260}
1261
Simon Hormane5054642015-05-25 14:28:36 +09001262static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1263 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001264{
Simon Hormane5054642015-05-25 14:28:36 +09001265 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001266 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1267
1268 if (!skb)
1269 return;
1270 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1271 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1272 dev_kfree_skb_any(skb);
1273}
1274
Simon Horman534ba6a2015-06-01 13:25:04 +09001275static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001276{
Simon Hormane5054642015-05-25 14:28:36 +09001277 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001278 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001279 int i;
1280 int err;
1281
1282 for (i = 0; i < rx_ring->size; i++) {
Simon Horman534ba6a2015-06-01 13:25:04 +09001283 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001284 &rx_ring->desc_info[i]);
1285 if (err)
1286 goto rollback;
1287 }
1288 return 0;
1289
1290rollback:
1291 for (i--; i >= 0; i--)
1292 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1293 return err;
1294}
1295
Simon Horman534ba6a2015-06-01 13:25:04 +09001296static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001297{
Simon Hormane5054642015-05-25 14:28:36 +09001298 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
Simon Horman534ba6a2015-06-01 13:25:04 +09001299 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001300 int i;
1301
1302 for (i = 0; i < rx_ring->size; i++)
1303 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1304}
1305
1306static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1307{
1308 struct rocker *rocker = rocker_port->rocker;
1309 int err;
1310
1311 err = rocker_dma_ring_create(rocker,
1312 ROCKER_DMA_TX(rocker_port->port_number),
1313 ROCKER_DMA_TX_DEFAULT_SIZE,
1314 &rocker_port->tx_ring);
1315 if (err) {
1316 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1317 return err;
1318 }
1319
1320 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1321 PCI_DMA_TODEVICE,
1322 ROCKER_DMA_TX_DESC_SIZE);
1323 if (err) {
1324 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1325 goto err_dma_tx_ring_bufs_alloc;
1326 }
1327
1328 err = rocker_dma_ring_create(rocker,
1329 ROCKER_DMA_RX(rocker_port->port_number),
1330 ROCKER_DMA_RX_DEFAULT_SIZE,
1331 &rocker_port->rx_ring);
1332 if (err) {
1333 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1334 goto err_dma_rx_ring_create;
1335 }
1336
1337 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1338 PCI_DMA_BIDIRECTIONAL,
1339 ROCKER_DMA_RX_DESC_SIZE);
1340 if (err) {
1341 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1342 goto err_dma_rx_ring_bufs_alloc;
1343 }
1344
Simon Horman534ba6a2015-06-01 13:25:04 +09001345 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001346 if (err) {
1347 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1348 goto err_dma_rx_ring_skbs_alloc;
1349 }
1350 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1351
1352 return 0;
1353
1354err_dma_rx_ring_skbs_alloc:
1355 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1356 PCI_DMA_BIDIRECTIONAL);
1357err_dma_rx_ring_bufs_alloc:
1358 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1359err_dma_rx_ring_create:
1360 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1361 PCI_DMA_TODEVICE);
1362err_dma_tx_ring_bufs_alloc:
1363 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1364 return err;
1365}
1366
1367static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1368{
1369 struct rocker *rocker = rocker_port->rocker;
1370
Simon Horman534ba6a2015-06-01 13:25:04 +09001371 rocker_dma_rx_ring_skbs_free(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001372 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1373 PCI_DMA_BIDIRECTIONAL);
1374 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1375 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1376 PCI_DMA_TODEVICE);
1377 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1378}
1379
Simon Hormane5054642015-05-25 14:28:36 +09001380static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1381 bool enable)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001382{
1383 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1384
1385 if (enable)
David S. Miller71a83a62015-03-03 21:16:48 -05001386 val |= 1ULL << rocker_port->pport;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001387 else
David S. Miller71a83a62015-03-03 21:16:48 -05001388 val &= ~(1ULL << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001389 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1390}
1391
1392/********************************
1393 * Interrupt handler and helpers
1394 ********************************/
1395
1396static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1397{
1398 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001399 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001400 struct rocker_wait *wait;
1401 u32 credits = 0;
1402
1403 spin_lock(&rocker->cmd_ring_lock);
1404 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1405 wait = rocker_desc_cookie_ptr_get(desc_info);
Scott Feldman179f9a22015-06-12 21:35:46 -07001406 if (wait->nowait) {
1407 rocker_desc_gen_clear(desc_info);
Jiri Pirko76c6f942015-09-24 10:02:44 +02001408 rocker_wait_destroy(NULL, wait);
Scott Feldman179f9a22015-06-12 21:35:46 -07001409 } else {
1410 rocker_wait_wake_up(wait);
1411 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001412 credits++;
1413 }
1414 spin_unlock(&rocker->cmd_ring_lock);
1415 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1416
1417 return IRQ_HANDLED;
1418}
1419
Simon Hormane5054642015-05-25 14:28:36 +09001420static void rocker_port_link_up(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001421{
1422 netif_carrier_on(rocker_port->dev);
1423 netdev_info(rocker_port->dev, "Link is up\n");
1424}
1425
Simon Hormane5054642015-05-25 14:28:36 +09001426static void rocker_port_link_down(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001427{
1428 netif_carrier_off(rocker_port->dev);
1429 netdev_info(rocker_port->dev, "Link is down\n");
1430}
1431
Simon Hormane5054642015-05-25 14:28:36 +09001432static int rocker_event_link_change(const struct rocker *rocker,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001433 const struct rocker_tlv *info)
1434{
Simon Hormane5054642015-05-25 14:28:36 +09001435 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001436 unsigned int port_number;
1437 bool link_up;
1438 struct rocker_port *rocker_port;
1439
1440 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001441 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001442 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1443 return -EIO;
1444 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001445 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001446 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1447
1448 if (port_number >= rocker->port_count)
1449 return -EINVAL;
1450
1451 rocker_port = rocker->ports[port_number];
1452 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1453 if (link_up)
1454 rocker_port_link_up(rocker_port);
1455 else
1456 rocker_port_link_down(rocker_port);
1457 }
1458
1459 return 0;
1460}
1461
Scott Feldman6c707942014-11-28 14:34:28 +01001462static int rocker_port_fdb(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02001463 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01001464 const unsigned char *addr,
1465 __be16 vlan_id, int flags);
1466
Simon Hormane5054642015-05-25 14:28:36 +09001467static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
Scott Feldman6c707942014-11-28 14:34:28 +01001468 const struct rocker_tlv *info)
1469{
Simon Hormane5054642015-05-25 14:28:36 +09001470 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
Scott Feldman6c707942014-11-28 14:34:28 +01001471 unsigned int port_number;
1472 struct rocker_port *rocker_port;
Simon Hormane5054642015-05-25 14:28:36 +09001473 const unsigned char *addr;
Scott Feldman92014b92015-06-12 21:35:49 -07001474 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
Scott Feldman6c707942014-11-28 14:34:28 +01001475 __be16 vlan_id;
1476
1477 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001478 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
Scott Feldman6c707942014-11-28 14:34:28 +01001479 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1480 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1481 return -EIO;
1482 port_number =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001483 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
Scott Feldman6c707942014-11-28 14:34:28 +01001484 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
Jiri Pirko9b03c712014-12-03 14:14:53 +01001485 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
Scott Feldman6c707942014-11-28 14:34:28 +01001486
1487 if (port_number >= rocker->port_count)
1488 return -EINVAL;
1489
1490 rocker_port = rocker->ports[port_number];
1491
1492 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1493 rocker_port->stp_state != BR_STATE_FORWARDING)
1494 return 0;
1495
Jiri Pirko76c6f942015-09-24 10:02:44 +02001496 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01001497}
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001498
Simon Hormane5054642015-05-25 14:28:36 +09001499static int rocker_event_process(const struct rocker *rocker,
1500 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001501{
Simon Hormane5054642015-05-25 14:28:36 +09001502 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1503 const struct rocker_tlv *info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001504 u16 type;
1505
1506 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1507 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1508 !attrs[ROCKER_TLV_EVENT_INFO])
1509 return -EIO;
1510
1511 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1512 info = attrs[ROCKER_TLV_EVENT_INFO];
1513
1514 switch (type) {
1515 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1516 return rocker_event_link_change(rocker, info);
Scott Feldman6c707942014-11-28 14:34:28 +01001517 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1518 return rocker_event_mac_vlan_seen(rocker, info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001519 }
1520
1521 return -EOPNOTSUPP;
1522}
1523
1524static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1525{
1526 struct rocker *rocker = dev_id;
Simon Hormane5054642015-05-25 14:28:36 +09001527 const struct pci_dev *pdev = rocker->pdev;
1528 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001529 u32 credits = 0;
1530 int err;
1531
1532 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1533 err = rocker_desc_err(desc_info);
1534 if (err) {
1535 dev_err(&pdev->dev, "event desc received with err %d\n",
1536 err);
1537 } else {
1538 err = rocker_event_process(rocker, desc_info);
1539 if (err)
1540 dev_err(&pdev->dev, "event processing failed with err %d\n",
1541 err);
1542 }
1543 rocker_desc_gen_clear(desc_info);
1544 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1545 credits++;
1546 }
1547 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1548
1549 return IRQ_HANDLED;
1550}
1551
1552static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1553{
1554 struct rocker_port *rocker_port = dev_id;
1555
1556 napi_schedule(&rocker_port->napi_tx);
1557 return IRQ_HANDLED;
1558}
1559
1560static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1561{
1562 struct rocker_port *rocker_port = dev_id;
1563
1564 napi_schedule(&rocker_port->napi_rx);
1565 return IRQ_HANDLED;
1566}
1567
1568/********************
1569 * Command interface
1570 ********************/
1571
Simon Horman534ba6a2015-06-01 13:25:04 +09001572typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001573 struct rocker_desc_info *desc_info,
1574 void *priv);
1575
Simon Horman534ba6a2015-06-01 13:25:04 +09001576typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001577 const struct rocker_desc_info *desc_info,
1578 void *priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001579
Simon Horman534ba6a2015-06-01 13:25:04 +09001580static int rocker_cmd_exec(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02001581 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09001582 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1583 rocker_cmd_proc_cb_t process, void *process_priv)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001584{
Simon Horman534ba6a2015-06-01 13:25:04 +09001585 struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001586 struct rocker_desc_info *desc_info;
1587 struct rocker_wait *wait;
Scott Feldman179f9a22015-06-12 21:35:46 -07001588 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1589 unsigned long lock_flags;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001590 int err;
1591
Jiri Pirko76c6f942015-09-24 10:02:44 +02001592 wait = rocker_wait_create(rocker_port, trans, flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001593 if (!wait)
1594 return -ENOMEM;
Scott Feldman179f9a22015-06-12 21:35:46 -07001595 wait->nowait = nowait;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001596
Scott Feldman179f9a22015-06-12 21:35:46 -07001597 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001598
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001599 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1600 if (!desc_info) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001601 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001602 err = -EAGAIN;
1603 goto out;
1604 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001605
Simon Horman534ba6a2015-06-01 13:25:04 +09001606 err = prepare(rocker_port, desc_info, prepare_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001607 if (err) {
Scott Feldman179f9a22015-06-12 21:35:46 -07001608 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001609 goto out;
1610 }
Scott Feldmanc4f20322015-05-10 09:47:50 -07001611
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001612 rocker_desc_cookie_ptr_set(desc_info, wait);
Scott Feldmanc4f20322015-05-10 09:47:50 -07001613
Jiri Pirko76c6f942015-09-24 10:02:44 +02001614 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07001615 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1616
Scott Feldman179f9a22015-06-12 21:35:46 -07001617 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1618
1619 if (nowait)
1620 return 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001621
Jiri Pirko76c6f942015-09-24 10:02:44 +02001622 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07001623 if (!rocker_wait_event_timeout(wait, HZ / 10))
1624 return -EIO;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001625
1626 err = rocker_desc_err(desc_info);
1627 if (err)
1628 return err;
1629
1630 if (process)
Simon Horman534ba6a2015-06-01 13:25:04 +09001631 err = process(rocker_port, desc_info, process_priv);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001632
1633 rocker_desc_gen_clear(desc_info);
1634out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02001635 rocker_wait_destroy(trans, wait);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001636 return err;
1637}
1638
1639static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001640rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001641 struct rocker_desc_info *desc_info,
1642 void *priv)
1643{
1644 struct rocker_tlv *cmd_info;
1645
1646 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1647 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1648 return -EMSGSIZE;
1649 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1650 if (!cmd_info)
1651 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001652 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1653 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001654 return -EMSGSIZE;
1655 rocker_tlv_nest_end(desc_info, cmd_info);
1656 return 0;
1657}
1658
1659static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001660rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001661 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001662 void *priv)
1663{
1664 struct ethtool_cmd *ecmd = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001665 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1666 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001667 u32 speed;
1668 u8 duplex;
1669 u8 autoneg;
1670
1671 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1672 if (!attrs[ROCKER_TLV_CMD_INFO])
1673 return -EIO;
1674
1675 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1676 attrs[ROCKER_TLV_CMD_INFO]);
1677 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1678 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1679 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1680 return -EIO;
1681
1682 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1683 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1684 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1685
1686 ecmd->transceiver = XCVR_INTERNAL;
1687 ecmd->supported = SUPPORTED_TP;
1688 ecmd->phy_address = 0xff;
1689 ecmd->port = PORT_TP;
1690 ethtool_cmd_speed_set(ecmd, speed);
1691 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1692 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1693
1694 return 0;
1695}
1696
1697static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001698rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001699 const struct rocker_desc_info *desc_info,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001700 void *priv)
1701{
1702 unsigned char *macaddr = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001703 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1704 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1705 const struct rocker_tlv *attr;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001706
1707 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1708 if (!attrs[ROCKER_TLV_CMD_INFO])
1709 return -EIO;
1710
1711 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1712 attrs[ROCKER_TLV_CMD_INFO]);
1713 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1714 if (!attr)
1715 return -EIO;
1716
1717 if (rocker_tlv_len(attr) != ETH_ALEN)
1718 return -EINVAL;
1719
1720 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1721 return 0;
1722}
1723
David Aherndb191702015-03-17 20:23:16 -06001724struct port_name {
1725 char *buf;
1726 size_t len;
1727};
1728
1729static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001730rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09001731 const struct rocker_desc_info *desc_info,
David Aherndb191702015-03-17 20:23:16 -06001732 void *priv)
1733{
Simon Hormane5054642015-05-25 14:28:36 +09001734 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1735 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
David Aherndb191702015-03-17 20:23:16 -06001736 struct port_name *name = priv;
Simon Hormane5054642015-05-25 14:28:36 +09001737 const struct rocker_tlv *attr;
David Aherndb191702015-03-17 20:23:16 -06001738 size_t i, j, len;
Simon Hormane5054642015-05-25 14:28:36 +09001739 const char *str;
David Aherndb191702015-03-17 20:23:16 -06001740
1741 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1742 if (!attrs[ROCKER_TLV_CMD_INFO])
1743 return -EIO;
1744
1745 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1746 attrs[ROCKER_TLV_CMD_INFO]);
1747 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1748 if (!attr)
1749 return -EIO;
1750
1751 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1752 str = rocker_tlv_data(attr);
1753
1754 /* make sure name only contains alphanumeric characters */
1755 for (i = j = 0; i < len; ++i) {
1756 if (isalnum(str[i])) {
1757 name->buf[j] = str[i];
1758 j++;
1759 }
1760 }
1761
1762 if (j == 0)
1763 return -EIO;
1764
1765 name->buf[j] = '\0';
1766
1767 return 0;
1768}
1769
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001770static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001771rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001772 struct rocker_desc_info *desc_info,
1773 void *priv)
1774{
1775 struct ethtool_cmd *ecmd = priv;
1776 struct rocker_tlv *cmd_info;
1777
1778 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1779 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1780 return -EMSGSIZE;
1781 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1782 if (!cmd_info)
1783 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001784 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1785 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001786 return -EMSGSIZE;
1787 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1788 ethtool_cmd_speed(ecmd)))
1789 return -EMSGSIZE;
1790 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1791 ecmd->duplex))
1792 return -EMSGSIZE;
1793 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1794 ecmd->autoneg))
1795 return -EMSGSIZE;
1796 rocker_tlv_nest_end(desc_info, cmd_info);
1797 return 0;
1798}
1799
1800static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001801rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001802 struct rocker_desc_info *desc_info,
1803 void *priv)
1804{
Simon Hormane5054642015-05-25 14:28:36 +09001805 const unsigned char *macaddr = priv;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001806 struct rocker_tlv *cmd_info;
1807
1808 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1809 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1810 return -EMSGSIZE;
1811 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1812 if (!cmd_info)
1813 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001814 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1815 rocker_port->pport))
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001816 return -EMSGSIZE;
1817 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1818 ETH_ALEN, macaddr))
1819 return -EMSGSIZE;
1820 rocker_tlv_nest_end(desc_info, cmd_info);
1821 return 0;
1822}
1823
Scott Feldman5111f802014-11-28 14:34:30 +01001824static int
Scott Feldman77a58c72015-07-08 16:06:47 -07001825rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1826 struct rocker_desc_info *desc_info,
1827 void *priv)
1828{
1829 int mtu = *(int *)priv;
1830 struct rocker_tlv *cmd_info;
1831
1832 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1833 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1834 return -EMSGSIZE;
1835 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1836 if (!cmd_info)
1837 return -EMSGSIZE;
1838 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1839 rocker_port->pport))
1840 return -EMSGSIZE;
1841 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1842 mtu))
1843 return -EMSGSIZE;
1844 rocker_tlv_nest_end(desc_info, cmd_info);
1845 return 0;
1846}
1847
1848static int
Simon Horman534ba6a2015-06-01 13:25:04 +09001849rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
Scott Feldman5111f802014-11-28 14:34:30 +01001850 struct rocker_desc_info *desc_info,
1851 void *priv)
1852{
1853 struct rocker_tlv *cmd_info;
1854
1855 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1856 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1857 return -EMSGSIZE;
1858 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1859 if (!cmd_info)
1860 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001861 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1862 rocker_port->pport))
Scott Feldman5111f802014-11-28 14:34:30 +01001863 return -EMSGSIZE;
1864 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1865 !!(rocker_port->brport_flags & BR_LEARNING)))
1866 return -EMSGSIZE;
1867 rocker_tlv_nest_end(desc_info, cmd_info);
1868 return 0;
1869}
1870
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001871static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1872 struct ethtool_cmd *ecmd)
1873{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001874 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001875 rocker_cmd_get_port_settings_prep, NULL,
1876 rocker_cmd_get_port_settings_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001877 ecmd);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001878}
1879
1880static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1881 unsigned char *macaddr)
1882{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001883 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001884 rocker_cmd_get_port_settings_prep, NULL,
1885 rocker_cmd_get_port_settings_macaddr_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001886 macaddr);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001887}
1888
1889static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1890 struct ethtool_cmd *ecmd)
1891{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001892 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001893 rocker_cmd_set_port_settings_ethtool_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001894 ecmd, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001895}
1896
1897static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1898 unsigned char *macaddr)
1899{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001900 return rocker_cmd_exec(rocker_port, NULL, 0,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001901 rocker_cmd_set_port_settings_macaddr_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001902 macaddr, NULL, NULL);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01001903}
1904
Scott Feldman77a58c72015-07-08 16:06:47 -07001905static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1906 int mtu)
1907{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001908 return rocker_cmd_exec(rocker_port, NULL, 0,
Scott Feldman77a58c72015-07-08 16:06:47 -07001909 rocker_cmd_set_port_settings_mtu_prep,
1910 &mtu, NULL, NULL);
1911}
1912
Scott Feldmanc4f20322015-05-10 09:47:50 -07001913static int rocker_port_set_learning(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02001914 struct switchdev_trans *trans)
Scott Feldman5111f802014-11-28 14:34:30 +01001915{
Jiri Pirko76c6f942015-09-24 10:02:44 +02001916 return rocker_cmd_exec(rocker_port, trans, 0,
Scott Feldman5111f802014-11-28 14:34:30 +01001917 rocker_cmd_set_port_learning_prep,
Scott Feldmanc4f20322015-05-10 09:47:50 -07001918 NULL, NULL, NULL);
Scott Feldman5111f802014-11-28 14:34:30 +01001919}
1920
Simon Hormane5054642015-05-25 14:28:36 +09001921static int
1922rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1923 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001924{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001925 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1926 entry->key.ig_port.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001927 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001928 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1929 entry->key.ig_port.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001930 return -EMSGSIZE;
1931 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1932 entry->key.ig_port.goto_tbl))
1933 return -EMSGSIZE;
1934
1935 return 0;
1936}
1937
Simon Hormane5054642015-05-25 14:28:36 +09001938static int
1939rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1940 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001941{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001942 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1943 entry->key.vlan.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001944 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001945 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1946 entry->key.vlan.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001947 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001948 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1949 entry->key.vlan.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001950 return -EMSGSIZE;
1951 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1952 entry->key.vlan.goto_tbl))
1953 return -EMSGSIZE;
1954 if (entry->key.vlan.untagged &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01001955 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1956 entry->key.vlan.new_vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001957 return -EMSGSIZE;
1958
1959 return 0;
1960}
1961
Simon Hormane5054642015-05-25 14:28:36 +09001962static int
1963rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1964 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001965{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001966 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1967 entry->key.term_mac.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001968 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08001969 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1970 entry->key.term_mac.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001971 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001972 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1973 entry->key.term_mac.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001974 return -EMSGSIZE;
1975 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1976 ETH_ALEN, entry->key.term_mac.eth_dst))
1977 return -EMSGSIZE;
1978 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1979 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1980 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001981 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1982 entry->key.term_mac.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001983 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01001984 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1985 entry->key.term_mac.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01001986 return -EMSGSIZE;
1987 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1988 entry->key.term_mac.goto_tbl))
1989 return -EMSGSIZE;
1990 if (entry->key.term_mac.copy_to_cpu &&
1991 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1992 entry->key.term_mac.copy_to_cpu))
1993 return -EMSGSIZE;
1994
1995 return 0;
1996}
1997
1998static int
1999rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002000 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002001{
Jiri Pirko9b03c712014-12-03 14:14:53 +01002002 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2003 entry->key.ucast_routing.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002004 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002005 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2006 entry->key.ucast_routing.dst4))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002007 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002008 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2009 entry->key.ucast_routing.dst4_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002010 return -EMSGSIZE;
2011 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2012 entry->key.ucast_routing.goto_tbl))
2013 return -EMSGSIZE;
2014 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2015 entry->key.ucast_routing.group_id))
2016 return -EMSGSIZE;
2017
2018 return 0;
2019}
2020
Simon Hormane5054642015-05-25 14:28:36 +09002021static int
2022rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2023 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002024{
2025 if (entry->key.bridge.has_eth_dst &&
2026 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2027 ETH_ALEN, entry->key.bridge.eth_dst))
2028 return -EMSGSIZE;
2029 if (entry->key.bridge.has_eth_dst_mask &&
2030 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2031 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2032 return -EMSGSIZE;
2033 if (entry->key.bridge.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002034 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2035 entry->key.bridge.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002036 return -EMSGSIZE;
2037 if (entry->key.bridge.tunnel_id &&
2038 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2039 entry->key.bridge.tunnel_id))
2040 return -EMSGSIZE;
2041 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2042 entry->key.bridge.goto_tbl))
2043 return -EMSGSIZE;
2044 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2045 entry->key.bridge.group_id))
2046 return -EMSGSIZE;
2047 if (entry->key.bridge.copy_to_cpu &&
2048 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2049 entry->key.bridge.copy_to_cpu))
2050 return -EMSGSIZE;
2051
2052 return 0;
2053}
2054
Simon Hormane5054642015-05-25 14:28:36 +09002055static int
2056rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2057 const struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002058{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002059 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2060 entry->key.acl.in_pport))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002061 return -EMSGSIZE;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002062 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2063 entry->key.acl.in_pport_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002064 return -EMSGSIZE;
2065 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2066 ETH_ALEN, entry->key.acl.eth_src))
2067 return -EMSGSIZE;
2068 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2069 ETH_ALEN, entry->key.acl.eth_src_mask))
2070 return -EMSGSIZE;
2071 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2072 ETH_ALEN, entry->key.acl.eth_dst))
2073 return -EMSGSIZE;
2074 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2075 ETH_ALEN, entry->key.acl.eth_dst_mask))
2076 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002077 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2078 entry->key.acl.eth_type))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002079 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002080 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2081 entry->key.acl.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002082 return -EMSGSIZE;
Jiri Pirko9b03c712014-12-03 14:14:53 +01002083 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2084 entry->key.acl.vlan_id_mask))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002085 return -EMSGSIZE;
2086
2087 switch (ntohs(entry->key.acl.eth_type)) {
2088 case ETH_P_IP:
2089 case ETH_P_IPV6:
2090 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2091 entry->key.acl.ip_proto))
2092 return -EMSGSIZE;
2093 if (rocker_tlv_put_u8(desc_info,
2094 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2095 entry->key.acl.ip_proto_mask))
2096 return -EMSGSIZE;
2097 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2098 entry->key.acl.ip_tos & 0x3f))
2099 return -EMSGSIZE;
2100 if (rocker_tlv_put_u8(desc_info,
2101 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2102 entry->key.acl.ip_tos_mask & 0x3f))
2103 return -EMSGSIZE;
2104 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2105 (entry->key.acl.ip_tos & 0xc0) >> 6))
2106 return -EMSGSIZE;
2107 if (rocker_tlv_put_u8(desc_info,
2108 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2109 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2110 return -EMSGSIZE;
2111 break;
2112 }
2113
2114 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2115 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2116 entry->key.acl.group_id))
2117 return -EMSGSIZE;
2118
2119 return 0;
2120}
2121
Simon Horman534ba6a2015-06-01 13:25:04 +09002122static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002123 struct rocker_desc_info *desc_info,
2124 void *priv)
2125{
Simon Hormane5054642015-05-25 14:28:36 +09002126 const struct rocker_flow_tbl_entry *entry = priv;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002127 struct rocker_tlv *cmd_info;
2128 int err = 0;
2129
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002130 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002131 return -EMSGSIZE;
2132 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2133 if (!cmd_info)
2134 return -EMSGSIZE;
2135 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2136 entry->key.tbl_id))
2137 return -EMSGSIZE;
2138 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2139 entry->key.priority))
2140 return -EMSGSIZE;
2141 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2142 return -EMSGSIZE;
2143 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2144 entry->cookie))
2145 return -EMSGSIZE;
2146
2147 switch (entry->key.tbl_id) {
2148 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2149 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2150 break;
2151 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2152 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2153 break;
2154 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2155 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2156 break;
2157 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2158 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2159 break;
2160 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2161 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2162 break;
2163 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2164 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2165 break;
2166 default:
2167 err = -ENOTSUPP;
2168 break;
2169 }
2170
2171 if (err)
2172 return err;
2173
2174 rocker_tlv_nest_end(desc_info, cmd_info);
2175
2176 return 0;
2177}
2178
Simon Horman534ba6a2015-06-01 13:25:04 +09002179static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002180 struct rocker_desc_info *desc_info,
2181 void *priv)
2182{
2183 const struct rocker_flow_tbl_entry *entry = priv;
2184 struct rocker_tlv *cmd_info;
2185
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002186 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002187 return -EMSGSIZE;
2188 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2189 if (!cmd_info)
2190 return -EMSGSIZE;
2191 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2192 entry->cookie))
2193 return -EMSGSIZE;
2194 rocker_tlv_nest_end(desc_info, cmd_info);
2195
2196 return 0;
2197}
2198
2199static int
2200rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2201 struct rocker_group_tbl_entry *entry)
2202{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002203 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002204 ROCKER_GROUP_PORT_GET(entry->group_id)))
2205 return -EMSGSIZE;
2206 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2207 entry->l2_interface.pop_vlan))
2208 return -EMSGSIZE;
2209
2210 return 0;
2211}
2212
2213static int
2214rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002215 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002216{
2217 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2218 entry->l2_rewrite.group_id))
2219 return -EMSGSIZE;
2220 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2221 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2222 ETH_ALEN, entry->l2_rewrite.eth_src))
2223 return -EMSGSIZE;
2224 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2225 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2226 ETH_ALEN, entry->l2_rewrite.eth_dst))
2227 return -EMSGSIZE;
2228 if (entry->l2_rewrite.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002229 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2230 entry->l2_rewrite.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002231 return -EMSGSIZE;
2232
2233 return 0;
2234}
2235
2236static int
2237rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002238 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002239{
2240 int i;
2241 struct rocker_tlv *group_ids;
2242
2243 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2244 entry->group_count))
2245 return -EMSGSIZE;
2246
2247 group_ids = rocker_tlv_nest_start(desc_info,
2248 ROCKER_TLV_OF_DPA_GROUP_IDS);
2249 if (!group_ids)
2250 return -EMSGSIZE;
2251
2252 for (i = 0; i < entry->group_count; i++)
2253 /* Note TLV array is 1-based */
2254 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2255 return -EMSGSIZE;
2256
2257 rocker_tlv_nest_end(desc_info, group_ids);
2258
2259 return 0;
2260}
2261
2262static int
2263rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
Simon Hormane5054642015-05-25 14:28:36 +09002264 const struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002265{
2266 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2267 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2268 ETH_ALEN, entry->l3_unicast.eth_src))
2269 return -EMSGSIZE;
2270 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2271 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2272 ETH_ALEN, entry->l3_unicast.eth_dst))
2273 return -EMSGSIZE;
2274 if (entry->l3_unicast.vlan_id &&
Jiri Pirko9b03c712014-12-03 14:14:53 +01002275 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2276 entry->l3_unicast.vlan_id))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002277 return -EMSGSIZE;
2278 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2279 entry->l3_unicast.ttl_check))
2280 return -EMSGSIZE;
2281 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2282 entry->l3_unicast.group_id))
2283 return -EMSGSIZE;
2284
2285 return 0;
2286}
2287
Simon Horman534ba6a2015-06-01 13:25:04 +09002288static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002289 struct rocker_desc_info *desc_info,
2290 void *priv)
2291{
2292 struct rocker_group_tbl_entry *entry = priv;
2293 struct rocker_tlv *cmd_info;
2294 int err = 0;
2295
2296 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2297 return -EMSGSIZE;
2298 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2299 if (!cmd_info)
2300 return -EMSGSIZE;
2301
2302 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2303 entry->group_id))
2304 return -EMSGSIZE;
2305
2306 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2307 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2308 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2309 break;
2310 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2311 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2312 break;
2313 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2314 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2315 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2316 break;
2317 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2318 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2319 break;
2320 default:
2321 err = -ENOTSUPP;
2322 break;
2323 }
2324
2325 if (err)
2326 return err;
2327
2328 rocker_tlv_nest_end(desc_info, cmd_info);
2329
2330 return 0;
2331}
2332
Simon Horman534ba6a2015-06-01 13:25:04 +09002333static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002334 struct rocker_desc_info *desc_info,
2335 void *priv)
2336{
2337 const struct rocker_group_tbl_entry *entry = priv;
2338 struct rocker_tlv *cmd_info;
2339
2340 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2341 return -EMSGSIZE;
2342 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2343 if (!cmd_info)
2344 return -EMSGSIZE;
2345 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2346 entry->group_id))
2347 return -EMSGSIZE;
2348 rocker_tlv_nest_end(desc_info, cmd_info);
2349
2350 return 0;
2351}
2352
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002353/***************************************************
2354 * Flow, group, FDB, internal VLAN and neigh tables
2355 ***************************************************/
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002356
2357static int rocker_init_tbls(struct rocker *rocker)
2358{
2359 hash_init(rocker->flow_tbl);
2360 spin_lock_init(&rocker->flow_tbl_lock);
2361
2362 hash_init(rocker->group_tbl);
2363 spin_lock_init(&rocker->group_tbl_lock);
2364
2365 hash_init(rocker->fdb_tbl);
2366 spin_lock_init(&rocker->fdb_tbl_lock);
2367
2368 hash_init(rocker->internal_vlan_tbl);
2369 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2370
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002371 hash_init(rocker->neigh_tbl);
2372 spin_lock_init(&rocker->neigh_tbl_lock);
2373
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002374 return 0;
2375}
2376
2377static void rocker_free_tbls(struct rocker *rocker)
2378{
2379 unsigned long flags;
2380 struct rocker_flow_tbl_entry *flow_entry;
2381 struct rocker_group_tbl_entry *group_entry;
2382 struct rocker_fdb_tbl_entry *fdb_entry;
2383 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002384 struct rocker_neigh_tbl_entry *neigh_entry;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002385 struct hlist_node *tmp;
2386 int bkt;
2387
2388 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2389 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2390 hash_del(&flow_entry->entry);
2391 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2392
2393 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2394 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2395 hash_del(&group_entry->entry);
2396 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2397
2398 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2399 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2400 hash_del(&fdb_entry->entry);
2401 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2402
2403 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2404 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2405 tmp, internal_vlan_entry, entry)
2406 hash_del(&internal_vlan_entry->entry);
2407 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002408
2409 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2410 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2411 hash_del(&neigh_entry->entry);
2412 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002413}
2414
2415static struct rocker_flow_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002416rocker_flow_tbl_find(const struct rocker *rocker,
2417 const struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002418{
2419 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002420 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002421
2422 hash_for_each_possible(rocker->flow_tbl, found,
2423 entry, match->key_crc32) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002424 if (memcmp(&found->key, &match->key, key_len) == 0)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002425 return found;
2426 }
2427
2428 return NULL;
2429}
2430
2431static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002432 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002433 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002434{
2435 struct rocker *rocker = rocker_port->rocker;
2436 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002437 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002438 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002439
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002440 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002441
Scott Feldman179f9a22015-06-12 21:35:46 -07002442 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002443
2444 found = rocker_flow_tbl_find(rocker, match);
2445
2446 if (found) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002447 match->cookie = found->cookie;
Jiri Pirko76c6f942015-09-24 10:02:44 +02002448 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002449 hash_del(&found->entry);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002450 rocker_port_kfree(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002451 found = match;
2452 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002453 } else {
2454 found = match;
2455 found->cookie = rocker->flow_tbl_next_cookie++;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002456 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002457 }
2458
Jiri Pirko76c6f942015-09-24 10:02:44 +02002459 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002460 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002461
Scott Feldman179f9a22015-06-12 21:35:46 -07002462 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002463
Jiri Pirko76c6f942015-09-24 10:02:44 +02002464 return rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07002465 rocker_cmd_flow_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002466}
2467
2468static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002469 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002470 struct rocker_flow_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002471{
2472 struct rocker *rocker = rocker_port->rocker;
2473 struct rocker_flow_tbl_entry *found;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002474 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
Scott Feldman179f9a22015-06-12 21:35:46 -07002475 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002476 int err = 0;
2477
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002478 match->key_crc32 = crc32(~0, &match->key, key_len);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002479
Scott Feldman179f9a22015-06-12 21:35:46 -07002480 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002481
2482 found = rocker_flow_tbl_find(rocker, match);
2483
2484 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002485 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002486 hash_del(&found->entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002487 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002488 }
2489
Scott Feldman179f9a22015-06-12 21:35:46 -07002490 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002491
Jiri Pirko76c6f942015-09-24 10:02:44 +02002492 rocker_port_kfree(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002493
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002494 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002495 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002496 rocker_cmd_flow_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002497 found, NULL, NULL);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002498 rocker_port_kfree(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002499 }
2500
2501 return err;
2502}
2503
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002504static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002505 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002506 struct rocker_flow_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002507{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002508 if (flags & ROCKER_OP_FLAG_REMOVE)
Jiri Pirko76c6f942015-09-24 10:02:44 +02002509 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002510 else
Jiri Pirko76c6f942015-09-24 10:02:44 +02002511 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002512}
2513
2514static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002515 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002516 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002517 enum rocker_of_dpa_table_id goto_tbl)
2518{
2519 struct rocker_flow_tbl_entry *entry;
2520
Jiri Pirko76c6f942015-09-24 10:02:44 +02002521 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002522 if (!entry)
2523 return -ENOMEM;
2524
2525 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2526 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002527 entry->key.ig_port.in_pport = in_pport;
2528 entry->key.ig_port.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002529 entry->key.ig_port.goto_tbl = goto_tbl;
2530
Jiri Pirko76c6f942015-09-24 10:02:44 +02002531 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002532}
2533
2534static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002535 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002536 u32 in_pport, __be16 vlan_id,
2537 __be16 vlan_id_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002538 enum rocker_of_dpa_table_id goto_tbl,
2539 bool untagged, __be16 new_vlan_id)
2540{
2541 struct rocker_flow_tbl_entry *entry;
2542
Jiri Pirko76c6f942015-09-24 10:02:44 +02002543 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002544 if (!entry)
2545 return -ENOMEM;
2546
2547 entry->key.priority = ROCKER_PRIORITY_VLAN;
2548 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002549 entry->key.vlan.in_pport = in_pport;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002550 entry->key.vlan.vlan_id = vlan_id;
2551 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2552 entry->key.vlan.goto_tbl = goto_tbl;
2553
2554 entry->key.vlan.untagged = untagged;
2555 entry->key.vlan.new_vlan_id = new_vlan_id;
2556
Jiri Pirko76c6f942015-09-24 10:02:44 +02002557 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002558}
2559
2560static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002561 struct switchdev_trans *trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002562 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002563 __be16 eth_type, const u8 *eth_dst,
2564 const u8 *eth_dst_mask, __be16 vlan_id,
2565 __be16 vlan_id_mask, bool copy_to_cpu,
2566 int flags)
2567{
2568 struct rocker_flow_tbl_entry *entry;
2569
Jiri Pirko76c6f942015-09-24 10:02:44 +02002570 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002571 if (!entry)
2572 return -ENOMEM;
2573
2574 if (is_multicast_ether_addr(eth_dst)) {
2575 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2576 entry->key.term_mac.goto_tbl =
2577 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2578 } else {
2579 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2580 entry->key.term_mac.goto_tbl =
2581 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2582 }
2583
2584 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002585 entry->key.term_mac.in_pport = in_pport;
2586 entry->key.term_mac.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002587 entry->key.term_mac.eth_type = eth_type;
2588 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2589 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2590 entry->key.term_mac.vlan_id = vlan_id;
2591 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2592 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2593
Jiri Pirko76c6f942015-09-24 10:02:44 +02002594 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002595}
2596
2597static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002598 struct switchdev_trans *trans, int flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002599 const u8 *eth_dst, const u8 *eth_dst_mask,
2600 __be16 vlan_id, u32 tunnel_id,
2601 enum rocker_of_dpa_table_id goto_tbl,
2602 u32 group_id, bool copy_to_cpu)
2603{
2604 struct rocker_flow_tbl_entry *entry;
2605 u32 priority;
2606 bool vlan_bridging = !!vlan_id;
2607 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2608 bool wild = false;
2609
Jiri Pirko76c6f942015-09-24 10:02:44 +02002610 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002611 if (!entry)
2612 return -ENOMEM;
2613
2614 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2615
2616 if (eth_dst) {
2617 entry->key.bridge.has_eth_dst = 1;
2618 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2619 }
2620 if (eth_dst_mask) {
2621 entry->key.bridge.has_eth_dst_mask = 1;
2622 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
Simon Horman629161f2015-04-30 15:21:29 +09002623 if (!ether_addr_equal(eth_dst_mask, ff_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002624 wild = true;
2625 }
2626
2627 priority = ROCKER_PRIORITY_UNKNOWN;
Thomas Graf51ace882014-11-28 14:34:32 +01002628 if (vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002629 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002630 else if (vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002631 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002632 else if (vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002633 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
Thomas Graf51ace882014-11-28 14:34:32 +01002634 else if (!vlan_bridging && dflt && wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002635 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
Thomas Graf51ace882014-11-28 14:34:32 +01002636 else if (!vlan_bridging && dflt && !wild)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002637 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
Thomas Graf51ace882014-11-28 14:34:32 +01002638 else if (!vlan_bridging && !dflt)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002639 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2640
2641 entry->key.priority = priority;
2642 entry->key.bridge.vlan_id = vlan_id;
2643 entry->key.bridge.tunnel_id = tunnel_id;
2644 entry->key.bridge.goto_tbl = goto_tbl;
2645 entry->key.bridge.group_id = group_id;
2646 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2647
Jiri Pirko76c6f942015-09-24 10:02:44 +02002648 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002649}
2650
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002651static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002652 struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002653 __be16 eth_type, __be32 dst,
2654 __be32 dst_mask, u32 priority,
2655 enum rocker_of_dpa_table_id goto_tbl,
2656 u32 group_id, int flags)
2657{
2658 struct rocker_flow_tbl_entry *entry;
2659
Jiri Pirko76c6f942015-09-24 10:02:44 +02002660 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002661 if (!entry)
2662 return -ENOMEM;
2663
2664 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2665 entry->key.priority = priority;
2666 entry->key.ucast_routing.eth_type = eth_type;
2667 entry->key.ucast_routing.dst4 = dst;
2668 entry->key.ucast_routing.dst4_mask = dst_mask;
2669 entry->key.ucast_routing.goto_tbl = goto_tbl;
2670 entry->key.ucast_routing.group_id = group_id;
2671 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2672 ucast_routing.group_id);
2673
Jiri Pirko76c6f942015-09-24 10:02:44 +02002674 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002675}
2676
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002677static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002678 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002679 u32 in_pport, u32 in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002680 const u8 *eth_src, const u8 *eth_src_mask,
2681 const u8 *eth_dst, const u8 *eth_dst_mask,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002682 __be16 eth_type, __be16 vlan_id,
2683 __be16 vlan_id_mask, u8 ip_proto,
2684 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002685 u32 group_id)
2686{
2687 u32 priority;
2688 struct rocker_flow_tbl_entry *entry;
2689
Jiri Pirko76c6f942015-09-24 10:02:44 +02002690 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002691 if (!entry)
2692 return -ENOMEM;
2693
2694 priority = ROCKER_PRIORITY_ACL_NORMAL;
2695 if (eth_dst && eth_dst_mask) {
Simon Horman629161f2015-04-30 15:21:29 +09002696 if (ether_addr_equal(eth_dst_mask, mcast_mac))
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002697 priority = ROCKER_PRIORITY_ACL_DFLT;
2698 else if (is_link_local_ether_addr(eth_dst))
2699 priority = ROCKER_PRIORITY_ACL_CTRL;
2700 }
2701
2702 entry->key.priority = priority;
2703 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002704 entry->key.acl.in_pport = in_pport;
2705 entry->key.acl.in_pport_mask = in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002706
2707 if (eth_src)
2708 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2709 if (eth_src_mask)
2710 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2711 if (eth_dst)
2712 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2713 if (eth_dst_mask)
2714 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2715
2716 entry->key.acl.eth_type = eth_type;
2717 entry->key.acl.vlan_id = vlan_id;
2718 entry->key.acl.vlan_id_mask = vlan_id_mask;
2719 entry->key.acl.ip_proto = ip_proto;
2720 entry->key.acl.ip_proto_mask = ip_proto_mask;
2721 entry->key.acl.ip_tos = ip_tos;
2722 entry->key.acl.ip_tos_mask = ip_tos_mask;
2723 entry->key.acl.group_id = group_id;
2724
Jiri Pirko76c6f942015-09-24 10:02:44 +02002725 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002726}
2727
2728static struct rocker_group_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002729rocker_group_tbl_find(const struct rocker *rocker,
2730 const struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002731{
2732 struct rocker_group_tbl_entry *found;
2733
2734 hash_for_each_possible(rocker->group_tbl, found,
2735 entry, match->group_id) {
2736 if (found->group_id == match->group_id)
2737 return found;
2738 }
2739
2740 return NULL;
2741}
2742
Jiri Pirko76c6f942015-09-24 10:02:44 +02002743static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002744 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002745{
2746 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2747 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2748 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
Jiri Pirko76c6f942015-09-24 10:02:44 +02002749 rocker_port_kfree(trans, entry->group_ids);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002750 break;
2751 default:
2752 break;
2753 }
Jiri Pirko76c6f942015-09-24 10:02:44 +02002754 rocker_port_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002755}
2756
2757static int rocker_group_tbl_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002758 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002759 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002760{
2761 struct rocker *rocker = rocker_port->rocker;
2762 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002763 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002764
Scott Feldman179f9a22015-06-12 21:35:46 -07002765 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002766
2767 found = rocker_group_tbl_find(rocker, match);
2768
2769 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002770 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002771 hash_del(&found->entry);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002772 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002773 found = match;
2774 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2775 } else {
2776 found = match;
2777 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2778 }
2779
Jiri Pirko76c6f942015-09-24 10:02:44 +02002780 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002781 hash_add(rocker->group_tbl, &found->entry, found->group_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002782
Scott Feldman179f9a22015-06-12 21:35:46 -07002783 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002784
Jiri Pirko76c6f942015-09-24 10:02:44 +02002785 return rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07002786 rocker_cmd_group_tbl_add, found, NULL, NULL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002787}
2788
2789static int rocker_group_tbl_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002790 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002791 struct rocker_group_tbl_entry *match)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002792{
2793 struct rocker *rocker = rocker_port->rocker;
2794 struct rocker_group_tbl_entry *found;
Scott Feldman179f9a22015-06-12 21:35:46 -07002795 unsigned long lock_flags;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002796 int err = 0;
2797
Scott Feldman179f9a22015-06-12 21:35:46 -07002798 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002799
2800 found = rocker_group_tbl_find(rocker, match);
2801
2802 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002803 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmanc4f20322015-05-10 09:47:50 -07002804 hash_del(&found->entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002805 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2806 }
2807
Scott Feldman179f9a22015-06-12 21:35:46 -07002808 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002809
Jiri Pirko76c6f942015-09-24 10:02:44 +02002810 rocker_group_tbl_entry_free(trans, match);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002811
2812 if (found) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002813 err = rocker_cmd_exec(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002814 rocker_cmd_group_tbl_del,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002815 found, NULL, NULL);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002816 rocker_group_tbl_entry_free(trans, found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002817 }
2818
2819 return err;
2820}
2821
2822static int rocker_group_tbl_do(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002823 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002824 struct rocker_group_tbl_entry *entry)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002825{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002826 if (flags & ROCKER_OP_FLAG_REMOVE)
Jiri Pirko76c6f942015-09-24 10:02:44 +02002827 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002828 else
Jiri Pirko76c6f942015-09-24 10:02:44 +02002829 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002830}
2831
2832static int rocker_group_l2_interface(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002833 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002834 __be16 vlan_id, u32 out_pport,
2835 int pop_vlan)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002836{
2837 struct rocker_group_tbl_entry *entry;
2838
Jiri Pirko76c6f942015-09-24 10:02:44 +02002839 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002840 if (!entry)
2841 return -ENOMEM;
2842
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08002843 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002844 entry->l2_interface.pop_vlan = pop_vlan;
2845
Jiri Pirko76c6f942015-09-24 10:02:44 +02002846 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002847}
2848
2849static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002850 struct switchdev_trans *trans,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002851 int flags, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002852 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002853{
2854 struct rocker_group_tbl_entry *entry;
2855
Jiri Pirko76c6f942015-09-24 10:02:44 +02002856 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002857 if (!entry)
2858 return -ENOMEM;
2859
2860 entry->group_id = group_id;
2861 entry->group_count = group_count;
2862
Jiri Pirko76c6f942015-09-24 10:02:44 +02002863 entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07002864 group_count, sizeof(u32));
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002865 if (!entry->group_ids) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002866 rocker_port_kfree(trans, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002867 return -ENOMEM;
2868 }
2869 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2870
Jiri Pirko76c6f942015-09-24 10:02:44 +02002871 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002872}
2873
2874static int rocker_group_l2_flood(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002875 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002876 __be16 vlan_id, u8 group_count,
Simon Hormane5054642015-05-25 14:28:36 +09002877 const u32 *group_ids, u32 group_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002878{
Jiri Pirko76c6f942015-09-24 10:02:44 +02002879 return rocker_group_l2_fan_out(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01002880 group_count, group_ids,
2881 group_id);
2882}
2883
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002884static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002885 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09002886 u32 index, const u8 *src_mac, const u8 *dst_mac,
Scott Feldmanc4f20322015-05-10 09:47:50 -07002887 __be16 vlan_id, bool ttl_check, u32 pport)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002888{
2889 struct rocker_group_tbl_entry *entry;
2890
Jiri Pirko76c6f942015-09-24 10:02:44 +02002891 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002892 if (!entry)
2893 return -ENOMEM;
2894
2895 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2896 if (src_mac)
2897 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2898 if (dst_mac)
2899 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2900 entry->l3_unicast.vlan_id = vlan_id;
2901 entry->l3_unicast.ttl_check = ttl_check;
2902 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2903
Jiri Pirko76c6f942015-09-24 10:02:44 +02002904 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002905}
2906
2907static struct rocker_neigh_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09002908rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002909{
2910 struct rocker_neigh_tbl_entry *found;
2911
Scott Feldman0f43deb2015-03-06 15:54:51 -08002912 hash_for_each_possible(rocker->neigh_tbl, found,
2913 entry, be32_to_cpu(ip_addr))
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002914 if (found->ip_addr == ip_addr)
2915 return found;
2916
2917 return NULL;
2918}
2919
2920static void _rocker_neigh_add(struct rocker *rocker,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002921 struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002922 struct rocker_neigh_tbl_entry *entry)
2923{
Jiri Pirko76c6f942015-09-24 10:02:44 +02002924 if (!switchdev_trans_ph_commit(trans))
Scott Feldman4d81db42015-06-12 21:24:40 -07002925 entry->index = rocker->neigh_tbl_next_index++;
Jiri Pirko76c6f942015-09-24 10:02:44 +02002926 if (switchdev_trans_ph_prepare(trans))
Simon Horman550ecc92015-05-21 12:40:16 +09002927 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002928 entry->ref_count++;
Scott Feldman0f43deb2015-03-06 15:54:51 -08002929 hash_add(rocker->neigh_tbl, &entry->entry,
2930 be32_to_cpu(entry->ip_addr));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002931}
2932
Jiri Pirko76c6f942015-09-24 10:02:44 +02002933static void _rocker_neigh_del(struct switchdev_trans *trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002934 struct rocker_neigh_tbl_entry *entry)
2935{
Jiri Pirko76c6f942015-09-24 10:02:44 +02002936 if (switchdev_trans_ph_prepare(trans))
Simon Horman550ecc92015-05-21 12:40:16 +09002937 return;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002938 if (--entry->ref_count == 0) {
2939 hash_del(&entry->entry);
Jiri Pirko76c6f942015-09-24 10:02:44 +02002940 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002941 }
2942}
2943
Scott Feldmanc4f20322015-05-10 09:47:50 -07002944static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002945 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09002946 const u8 *eth_dst, bool ttl_check)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002947{
2948 if (eth_dst) {
2949 ether_addr_copy(entry->eth_dst, eth_dst);
2950 entry->ttl_check = ttl_check;
Jiri Pirko76c6f942015-09-24 10:02:44 +02002951 } else if (!switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002952 entry->ref_count++;
2953 }
2954}
2955
2956static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02002957 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09002958 int flags, __be32 ip_addr, const u8 *eth_dst)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002959{
2960 struct rocker *rocker = rocker_port->rocker;
2961 struct rocker_neigh_tbl_entry *entry;
2962 struct rocker_neigh_tbl_entry *found;
2963 unsigned long lock_flags;
2964 __be16 eth_type = htons(ETH_P_IP);
2965 enum rocker_of_dpa_table_id goto_tbl =
2966 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2967 u32 group_id;
2968 u32 priority = 0;
2969 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2970 bool updating;
2971 bool removing;
2972 int err = 0;
2973
Jiri Pirko76c6f942015-09-24 10:02:44 +02002974 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002975 if (!entry)
2976 return -ENOMEM;
2977
2978 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2979
2980 found = rocker_neigh_tbl_find(rocker, ip_addr);
2981
2982 updating = found && adding;
2983 removing = found && !adding;
2984 adding = !found && adding;
2985
2986 if (adding) {
2987 entry->ip_addr = ip_addr;
2988 entry->dev = rocker_port->dev;
2989 ether_addr_copy(entry->eth_dst, eth_dst);
2990 entry->ttl_check = true;
Jiri Pirko76c6f942015-09-24 10:02:44 +02002991 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002992 } else if (removing) {
2993 memcpy(entry, found, sizeof(*entry));
Jiri Pirko76c6f942015-09-24 10:02:44 +02002994 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002995 } else if (updating) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02002996 _rocker_neigh_update(found, trans, eth_dst, true);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08002997 memcpy(entry, found, sizeof(*entry));
2998 } else {
2999 err = -ENOENT;
3000 }
3001
3002 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3003
3004 if (err)
3005 goto err_out;
3006
3007 /* For each active neighbor, we have an L3 unicast group and
3008 * a /32 route to the neighbor, which uses the L3 unicast
3009 * group. The L3 unicast group can also be referred to by
3010 * other routes' nexthops.
3011 */
3012
Jiri Pirko76c6f942015-09-24 10:02:44 +02003013 err = rocker_group_l3_unicast(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003014 entry->index,
3015 rocker_port->dev->dev_addr,
3016 entry->eth_dst,
3017 rocker_port->internal_vlan_id,
3018 entry->ttl_check,
3019 rocker_port->pport);
3020 if (err) {
3021 netdev_err(rocker_port->dev,
3022 "Error (%d) L3 unicast group index %d\n",
3023 err, entry->index);
3024 goto err_out;
3025 }
3026
3027 if (adding || removing) {
3028 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003029 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003030 eth_type, ip_addr,
3031 inet_make_mask(32),
3032 priority, goto_tbl,
3033 group_id, flags);
3034
3035 if (err)
3036 netdev_err(rocker_port->dev,
3037 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3038 err, &entry->ip_addr, group_id);
3039 }
3040
3041err_out:
3042 if (!adding)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003043 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003044
3045 return err;
3046}
3047
3048static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003049 struct switchdev_trans *trans,
3050 __be32 ip_addr)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003051{
3052 struct net_device *dev = rocker_port->dev;
Scott Feldman0f43deb2015-03-06 15:54:51 -08003053 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003054 int err = 0;
3055
Ying Xue4133fc02015-05-15 12:53:21 +08003056 if (!n) {
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003057 n = neigh_create(&arp_tbl, &ip_addr, dev);
Ying Xue4133fc02015-05-15 12:53:21 +08003058 if (IS_ERR(n))
3059 return IS_ERR(n);
3060 }
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003061
3062 /* If the neigh is already resolved, then go ahead and
3063 * install the entry, otherwise start the ARP process to
3064 * resolve the neigh.
3065 */
3066
3067 if (n->nud_state & NUD_VALID)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003068 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003069 ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003070 else
3071 neigh_event_send(n, NULL);
3072
Ying Xue4133fc02015-05-15 12:53:21 +08003073 neigh_release(n);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003074 return err;
3075}
3076
Scott Feldmanc4f20322015-05-10 09:47:50 -07003077static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003078 struct switchdev_trans *trans, int flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003079 __be32 ip_addr, u32 *index)
3080{
3081 struct rocker *rocker = rocker_port->rocker;
3082 struct rocker_neigh_tbl_entry *entry;
3083 struct rocker_neigh_tbl_entry *found;
3084 unsigned long lock_flags;
3085 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3086 bool updating;
3087 bool removing;
3088 bool resolved = true;
3089 int err = 0;
3090
Jiri Pirko76c6f942015-09-24 10:02:44 +02003091 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003092 if (!entry)
3093 return -ENOMEM;
3094
3095 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3096
3097 found = rocker_neigh_tbl_find(rocker, ip_addr);
3098 if (found)
3099 *index = found->index;
3100
3101 updating = found && adding;
3102 removing = found && !adding;
3103 adding = !found && adding;
3104
3105 if (adding) {
3106 entry->ip_addr = ip_addr;
3107 entry->dev = rocker_port->dev;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003108 _rocker_neigh_add(rocker, trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003109 *index = entry->index;
3110 resolved = false;
3111 } else if (removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003112 _rocker_neigh_del(trans, found);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003113 } else if (updating) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003114 _rocker_neigh_update(found, trans, NULL, false);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003115 resolved = !is_zero_ether_addr(found->eth_dst);
3116 } else {
3117 err = -ENOENT;
3118 }
3119
3120 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3121
3122 if (!adding)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003123 rocker_port_kfree(trans, entry);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003124
3125 if (err)
3126 return err;
3127
3128 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3129
3130 if (!resolved)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003131 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003132
3133 return err;
3134}
3135
Scott Feldman6c707942014-11-28 14:34:28 +01003136static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003137 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003138 int flags, __be16 vlan_id)
3139{
3140 struct rocker_port *p;
Simon Hormane5054642015-05-25 14:28:36 +09003141 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003142 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
Scott Feldman04f49fa2015-03-15 23:04:46 -07003143 u32 *group_ids;
Scott Feldman6c707942014-11-28 14:34:28 +01003144 u8 group_count = 0;
Scott Feldman04f49fa2015-03-15 23:04:46 -07003145 int err = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01003146 int i;
3147
Jiri Pirko76c6f942015-09-24 10:02:44 +02003148 group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07003149 rocker->port_count, sizeof(u32));
Scott Feldman04f49fa2015-03-15 23:04:46 -07003150 if (!group_ids)
3151 return -ENOMEM;
3152
Scott Feldman6c707942014-11-28 14:34:28 +01003153 /* Adjust the flood group for this VLAN. The flood group
3154 * references an L2 interface group for each port in this
3155 * VLAN.
3156 */
3157
3158 for (i = 0; i < rocker->port_count; i++) {
3159 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003160 if (!p)
3161 continue;
Scott Feldman6c707942014-11-28 14:34:28 +01003162 if (!rocker_port_is_bridged(p))
3163 continue;
3164 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3165 group_ids[group_count++] =
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003166 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003167 }
3168 }
3169
3170 /* If there are no bridged ports in this VLAN, we're done */
3171 if (group_count == 0)
Scott Feldman04f49fa2015-03-15 23:04:46 -07003172 goto no_ports_in_vlan;
Scott Feldman6c707942014-11-28 14:34:28 +01003173
Jiri Pirko76c6f942015-09-24 10:02:44 +02003174 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003175 group_count, group_ids, group_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003176 if (err)
3177 netdev_err(rocker_port->dev,
3178 "Error (%d) port VLAN l2 flood group\n", err);
3179
Scott Feldman04f49fa2015-03-15 23:04:46 -07003180no_ports_in_vlan:
Jiri Pirko76c6f942015-09-24 10:02:44 +02003181 rocker_port_kfree(trans, group_ids);
Scott Feldman6c707942014-11-28 14:34:28 +01003182 return err;
3183}
3184
3185static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003186 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003187 __be16 vlan_id, bool pop_vlan)
Scott Feldman6c707942014-11-28 14:34:28 +01003188{
Simon Hormane5054642015-05-25 14:28:36 +09003189 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman6c707942014-11-28 14:34:28 +01003190 struct rocker_port *p;
3191 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003192 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003193 int ref = 0;
3194 int err;
3195 int i;
3196
3197 /* An L2 interface group for this port in this VLAN, but
3198 * only when port STP state is LEARNING|FORWARDING.
3199 */
3200
3201 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3202 rocker_port->stp_state == BR_STATE_FORWARDING) {
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003203 out_pport = rocker_port->pport;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003204 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003205 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003206 if (err) {
3207 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003208 "Error (%d) port VLAN l2 group for pport %d\n",
3209 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003210 return err;
3211 }
3212 }
3213
3214 /* An L2 interface group for this VLAN to CPU port.
3215 * Add when first port joins this VLAN and destroy when
3216 * last port leaves this VLAN.
3217 */
3218
3219 for (i = 0; i < rocker->port_count; i++) {
3220 p = rocker->ports[i];
Scott Feldmanbcfd7802015-06-01 11:39:04 -07003221 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003222 ref++;
3223 }
3224
3225 if ((!adding || ref != 1) && (adding || ref != 0))
3226 return 0;
3227
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003228 out_pport = 0;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003229 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003230 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003231 if (err) {
3232 netdev_err(rocker_port->dev,
3233 "Error (%d) port VLAN l2 group for CPU port\n", err);
3234 return err;
3235 }
3236
3237 return 0;
3238}
3239
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003240static struct rocker_ctrl {
3241 const u8 *eth_dst;
3242 const u8 *eth_dst_mask;
Jiri Pirko11e6c652014-12-03 14:14:54 +01003243 __be16 eth_type;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003244 bool acl;
3245 bool bridge;
3246 bool term;
3247 bool copy_to_cpu;
3248} rocker_ctrls[] = {
3249 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3250 /* pass link local multicast pkts up to CPU for filtering */
3251 .eth_dst = ll_mac,
3252 .eth_dst_mask = ll_mask,
3253 .acl = true,
3254 },
3255 [ROCKER_CTRL_LOCAL_ARP] = {
3256 /* pass local ARP pkts up to CPU */
3257 .eth_dst = zero_mac,
3258 .eth_dst_mask = zero_mac,
3259 .eth_type = htons(ETH_P_ARP),
3260 .acl = true,
3261 },
3262 [ROCKER_CTRL_IPV4_MCAST] = {
3263 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3264 .eth_dst = ipv4_mcast,
3265 .eth_dst_mask = ipv4_mask,
3266 .eth_type = htons(ETH_P_IP),
3267 .term = true,
3268 .copy_to_cpu = true,
3269 },
3270 [ROCKER_CTRL_IPV6_MCAST] = {
3271 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3272 .eth_dst = ipv6_mcast,
3273 .eth_dst_mask = ipv6_mask,
3274 .eth_type = htons(ETH_P_IPV6),
3275 .term = true,
3276 .copy_to_cpu = true,
3277 },
3278 [ROCKER_CTRL_DFLT_BRIDGING] = {
3279 /* flood any pkts on vlan */
3280 .bridge = true,
3281 .copy_to_cpu = true,
3282 },
Simon Horman82549732015-07-16 10:39:14 +09003283 [ROCKER_CTRL_DFLT_OVS] = {
3284 /* pass all pkts up to CPU */
3285 .eth_dst = zero_mac,
3286 .eth_dst_mask = zero_mac,
3287 .acl = true,
3288 },
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003289};
3290
3291static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003292 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003293 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003294{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003295 u32 in_pport = rocker_port->pport;
3296 u32 in_pport_mask = 0xffffffff;
3297 u32 out_pport = 0;
Simon Hormane5054642015-05-25 14:28:36 +09003298 const u8 *eth_src = NULL;
3299 const u8 *eth_src_mask = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003300 __be16 vlan_id_mask = htons(0xffff);
3301 u8 ip_proto = 0;
3302 u8 ip_proto_mask = 0;
3303 u8 ip_tos = 0;
3304 u8 ip_tos_mask = 0;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003305 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003306 int err;
3307
Jiri Pirko76c6f942015-09-24 10:02:44 +02003308 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003309 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003310 eth_src, eth_src_mask,
3311 ctrl->eth_dst, ctrl->eth_dst_mask,
3312 ctrl->eth_type,
3313 vlan_id, vlan_id_mask,
3314 ip_proto, ip_proto_mask,
3315 ip_tos, ip_tos_mask,
3316 group_id);
3317
3318 if (err)
3319 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3320
3321 return err;
3322}
3323
Scott Feldman6c707942014-11-28 14:34:28 +01003324static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003325 struct switchdev_trans *trans,
3326 int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003327 const struct rocker_ctrl *ctrl,
Scott Feldman6c707942014-11-28 14:34:28 +01003328 __be16 vlan_id)
3329{
3330 enum rocker_of_dpa_table_id goto_tbl =
3331 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3332 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3333 u32 tunnel_id = 0;
3334 int err;
3335
3336 if (!rocker_port_is_bridged(rocker_port))
3337 return 0;
3338
Jiri Pirko76c6f942015-09-24 10:02:44 +02003339 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003340 ctrl->eth_dst, ctrl->eth_dst_mask,
3341 vlan_id, tunnel_id,
3342 goto_tbl, group_id, ctrl->copy_to_cpu);
3343
3344 if (err)
3345 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3346
3347 return err;
3348}
3349
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003350static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003351 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003352 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003353{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003354 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003355 __be16 vlan_id_mask = htons(0xffff);
3356 int err;
3357
3358 if (ntohs(vlan_id) == 0)
3359 vlan_id = rocker_port->internal_vlan_id;
3360
Jiri Pirko76c6f942015-09-24 10:02:44 +02003361 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003362 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003363 ctrl->eth_type, ctrl->eth_dst,
3364 ctrl->eth_dst_mask, vlan_id,
3365 vlan_id_mask, ctrl->copy_to_cpu,
3366 flags);
3367
3368 if (err)
3369 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3370
3371 return err;
3372}
3373
Scott Feldmanc4f20322015-05-10 09:47:50 -07003374static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003375 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003376 const struct rocker_ctrl *ctrl, __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003377{
3378 if (ctrl->acl)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003379 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003380 ctrl, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003381 if (ctrl->bridge)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003382 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003383 ctrl, vlan_id);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003384
3385 if (ctrl->term)
Jiri Pirko76c6f942015-09-24 10:02:44 +02003386 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003387 ctrl, vlan_id);
3388
3389 return -EOPNOTSUPP;
3390}
3391
3392static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003393 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003394 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003395{
3396 int err = 0;
3397 int i;
3398
3399 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3400 if (rocker_port->ctrls[i]) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003401 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003402 &rocker_ctrls[i], vlan_id);
3403 if (err)
3404 return err;
3405 }
3406 }
3407
3408 return err;
3409}
3410
Scott Feldmanc4f20322015-05-10 09:47:50 -07003411static int rocker_port_ctrl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003412 struct switchdev_trans *trans, int flags,
Simon Hormane5054642015-05-25 14:28:36 +09003413 const struct rocker_ctrl *ctrl)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003414{
3415 u16 vid;
3416 int err = 0;
3417
3418 for (vid = 1; vid < VLAN_N_VID; vid++) {
3419 if (!test_bit(vid, rocker_port->vlan_bitmap))
3420 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003421 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003422 ctrl, htons(vid));
3423 if (err)
3424 break;
3425 }
3426
3427 return err;
3428}
3429
Scott Feldmanc4f20322015-05-10 09:47:50 -07003430static int rocker_port_vlan(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003431 struct switchdev_trans *trans, int flags, u16 vid)
Scott Feldman6c707942014-11-28 14:34:28 +01003432{
3433 enum rocker_of_dpa_table_id goto_tbl =
3434 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003435 u32 in_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003436 __be16 vlan_id = htons(vid);
3437 __be16 vlan_id_mask = htons(0xffff);
3438 __be16 internal_vlan_id;
3439 bool untagged;
3440 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3441 int err;
3442
3443 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3444
Scott Feldman9228ad22015-05-10 09:47:54 -07003445 if (adding && test_bit(ntohs(internal_vlan_id),
3446 rocker_port->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003447 return 0; /* already added */
Scott Feldman9228ad22015-05-10 09:47:54 -07003448 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3449 rocker_port->vlan_bitmap))
Scott Feldman6c707942014-11-28 14:34:28 +01003450 return 0; /* already removed */
3451
Scott Feldman9228ad22015-05-10 09:47:54 -07003452 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3453
Scott Feldman6c707942014-11-28 14:34:28 +01003454 if (adding) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003455 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003456 internal_vlan_id);
3457 if (err) {
3458 netdev_err(rocker_port->dev,
3459 "Error (%d) port ctrl vlan add\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003460 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003461 }
3462 }
3463
Jiri Pirko76c6f942015-09-24 10:02:44 +02003464 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003465 internal_vlan_id, untagged);
3466 if (err) {
3467 netdev_err(rocker_port->dev,
3468 "Error (%d) port VLAN l2 groups\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003469 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003470 }
3471
Jiri Pirko76c6f942015-09-24 10:02:44 +02003472 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003473 internal_vlan_id);
3474 if (err) {
3475 netdev_err(rocker_port->dev,
3476 "Error (%d) port VLAN l2 flood group\n", err);
Scott Feldman9228ad22015-05-10 09:47:54 -07003477 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003478 }
3479
Jiri Pirko76c6f942015-09-24 10:02:44 +02003480 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003481 in_pport, vlan_id, vlan_id_mask,
Scott Feldman6c707942014-11-28 14:34:28 +01003482 goto_tbl, untagged, internal_vlan_id);
3483 if (err)
3484 netdev_err(rocker_port->dev,
3485 "Error (%d) port VLAN table\n", err);
3486
Scott Feldman9228ad22015-05-10 09:47:54 -07003487err_out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02003488 if (switchdev_trans_ph_prepare(trans))
Scott Feldman9228ad22015-05-10 09:47:54 -07003489 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3490
Scott Feldman6c707942014-11-28 14:34:28 +01003491 return err;
3492}
3493
Scott Feldmanc4f20322015-05-10 09:47:50 -07003494static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003495 struct switchdev_trans *trans, int flags)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003496{
3497 enum rocker_of_dpa_table_id goto_tbl;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003498 u32 in_pport;
3499 u32 in_pport_mask;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003500 int err;
3501
3502 /* Normal Ethernet Frames. Matches pkts from any local physical
3503 * ports. Goto VLAN tbl.
3504 */
3505
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003506 in_pport = 0;
3507 in_pport_mask = 0xffff0000;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003508 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3509
Jiri Pirko76c6f942015-09-24 10:02:44 +02003510 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003511 in_pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003512 goto_tbl);
3513 if (err)
3514 netdev_err(rocker_port->dev,
3515 "Error (%d) ingress port table entry\n", err);
3516
3517 return err;
3518}
3519
Scott Feldman6c707942014-11-28 14:34:28 +01003520struct rocker_fdb_learn_work {
3521 struct work_struct work;
Scott Feldmanc4f20322015-05-10 09:47:50 -07003522 struct rocker_port *rocker_port;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003523 struct switchdev_trans *trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003524 int flags;
3525 u8 addr[ETH_ALEN];
3526 u16 vid;
3527};
3528
3529static void rocker_port_fdb_learn_work(struct work_struct *work)
3530{
Simon Hormane5054642015-05-25 14:28:36 +09003531 const struct rocker_fdb_learn_work *lw =
Scott Feldman6c707942014-11-28 14:34:28 +01003532 container_of(work, struct rocker_fdb_learn_work, work);
3533 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3534 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003535 struct switchdev_notifier_fdb_info info;
Jiri Pirko3aeb6612015-01-15 23:49:37 +01003536
3537 info.addr = lw->addr;
3538 info.vid = lw->vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003539
Thomas Graf51ace882014-11-28 14:34:32 +01003540 if (learned && removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003541 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003542 lw->rocker_port->dev, &info.info);
Thomas Graf51ace882014-11-28 14:34:32 +01003543 else if (learned && !removing)
Jiri Pirkoebb9a032015-05-10 09:47:46 -07003544 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003545 lw->rocker_port->dev, &info.info);
Scott Feldman6c707942014-11-28 14:34:28 +01003546
Jiri Pirko76c6f942015-09-24 10:02:44 +02003547 rocker_port_kfree(lw->trans, work);
Scott Feldman6c707942014-11-28 14:34:28 +01003548}
3549
3550static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003551 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003552 const u8 *addr, __be16 vlan_id)
Scott Feldman6c707942014-11-28 14:34:28 +01003553{
3554 struct rocker_fdb_learn_work *lw;
3555 enum rocker_of_dpa_table_id goto_tbl =
3556 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003557 u32 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003558 u32 tunnel_id = 0;
3559 u32 group_id = ROCKER_GROUP_NONE;
Scott Feldman5111f802014-11-28 14:34:30 +01003560 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
Scott Feldman6c707942014-11-28 14:34:28 +01003561 bool copy_to_cpu = false;
3562 int err;
3563
3564 if (rocker_port_is_bridged(rocker_port))
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003565 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003566
3567 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003568 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003569 NULL, vlan_id, tunnel_id, goto_tbl,
Scott Feldman6c707942014-11-28 14:34:28 +01003570 group_id, copy_to_cpu);
3571 if (err)
3572 return err;
3573 }
3574
Scott Feldman5111f802014-11-28 14:34:30 +01003575 if (!syncing)
3576 return 0;
3577
Scott Feldman6c707942014-11-28 14:34:28 +01003578 if (!rocker_port_is_bridged(rocker_port))
3579 return 0;
3580
Jiri Pirko76c6f942015-09-24 10:02:44 +02003581 lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
Scott Feldman6c707942014-11-28 14:34:28 +01003582 if (!lw)
3583 return -ENOMEM;
3584
3585 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3586
Scott Feldmanc4f20322015-05-10 09:47:50 -07003587 lw->rocker_port = rocker_port;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003588 lw->trans = trans;
Scott Feldman6c707942014-11-28 14:34:28 +01003589 lw->flags = flags;
3590 ether_addr_copy(lw->addr, addr);
3591 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3592
Jiri Pirko76c6f942015-09-24 10:02:44 +02003593 if (switchdev_trans_ph_prepare(trans))
3594 rocker_port_kfree(trans, lw);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003595 else
3596 schedule_work(&lw->work);
Scott Feldman6c707942014-11-28 14:34:28 +01003597
3598 return 0;
3599}
3600
3601static struct rocker_fdb_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003602rocker_fdb_tbl_find(const struct rocker *rocker,
3603 const struct rocker_fdb_tbl_entry *match)
Scott Feldman6c707942014-11-28 14:34:28 +01003604{
3605 struct rocker_fdb_tbl_entry *found;
3606
3607 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3608 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3609 return found;
3610
3611 return NULL;
3612}
3613
3614static int rocker_port_fdb(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003615 struct switchdev_trans *trans,
Scott Feldman6c707942014-11-28 14:34:28 +01003616 const unsigned char *addr,
3617 __be16 vlan_id, int flags)
3618{
3619 struct rocker *rocker = rocker_port->rocker;
3620 struct rocker_fdb_tbl_entry *fdb;
3621 struct rocker_fdb_tbl_entry *found;
3622 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3623 unsigned long lock_flags;
3624
Jiri Pirko76c6f942015-09-24 10:02:44 +02003625 fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
Scott Feldman6c707942014-11-28 14:34:28 +01003626 if (!fdb)
3627 return -ENOMEM;
3628
3629 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
Scott Feldmana471be42015-09-23 08:39:14 -07003630 fdb->touched = jiffies;
Scott Feldman4c660492015-09-23 08:39:15 -07003631 fdb->key.rocker_port = rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01003632 ether_addr_copy(fdb->key.addr, addr);
3633 fdb->key.vlan_id = vlan_id;
3634 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3635
3636 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3637
3638 found = rocker_fdb_tbl_find(rocker, fdb);
3639
Scott Feldmana471be42015-09-23 08:39:14 -07003640 if (found) {
3641 found->touched = jiffies;
3642 if (removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003643 rocker_port_kfree(trans, fdb);
3644 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmana471be42015-09-23 08:39:14 -07003645 hash_del(&found->entry);
3646 }
3647 } else if (!removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003648 if (!switchdev_trans_ph_prepare(trans))
Scott Feldmana471be42015-09-23 08:39:14 -07003649 hash_add(rocker->fdb_tbl, &fdb->entry,
3650 fdb->key_crc32);
Scott Feldman6c707942014-11-28 14:34:28 +01003651 }
3652
3653 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3654
3655 /* Check if adding and already exists, or removing and can't find */
3656 if (!found != !removing) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003657 rocker_port_kfree(trans, fdb);
Scott Feldman6c707942014-11-28 14:34:28 +01003658 if (!found && removing)
3659 return 0;
3660 /* Refreshing existing to update aging timers */
3661 flags |= ROCKER_OP_FLAG_REFRESH;
3662 }
3663
Jiri Pirko76c6f942015-09-24 10:02:44 +02003664 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
Scott Feldman6c707942014-11-28 14:34:28 +01003665}
3666
Scott Feldmanc4f20322015-05-10 09:47:50 -07003667static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003668 struct switchdev_trans *trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003669{
3670 struct rocker *rocker = rocker_port->rocker;
3671 struct rocker_fdb_tbl_entry *found;
3672 unsigned long lock_flags;
Scott Feldman6c707942014-11-28 14:34:28 +01003673 struct hlist_node *tmp;
3674 int bkt;
3675 int err = 0;
3676
3677 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3678 rocker_port->stp_state == BR_STATE_FORWARDING)
3679 return 0;
3680
Scott Feldman179f9a22015-06-12 21:35:46 -07003681 flags |= ROCKER_OP_FLAG_REMOVE;
3682
Scott Feldman6c707942014-11-28 14:34:28 +01003683 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3684
3685 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07003686 if (found->key.rocker_port != rocker_port)
Scott Feldman6c707942014-11-28 14:34:28 +01003687 continue;
3688 if (!found->learned)
3689 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003690 err = rocker_port_fdb_learn(rocker_port, trans, flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003691 found->key.addr,
3692 found->key.vlan_id);
3693 if (err)
3694 goto err_out;
Jiri Pirko76c6f942015-09-24 10:02:44 +02003695 if (!switchdev_trans_ph_prepare(trans))
Simon Horman3098ac32015-05-21 12:40:14 +09003696 hash_del(&found->entry);
Scott Feldman6c707942014-11-28 14:34:28 +01003697 }
3698
3699err_out:
3700 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3701
3702 return err;
3703}
3704
Scott Feldman52fe3e22015-09-23 08:39:18 -07003705static void rocker_fdb_cleanup(unsigned long data)
3706{
3707 struct rocker *rocker = (struct rocker *)data;
3708 struct rocker_port *rocker_port;
3709 struct rocker_fdb_tbl_entry *entry;
3710 struct hlist_node *tmp;
3711 unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
3712 unsigned long expires;
3713 unsigned long lock_flags;
3714 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3715 ROCKER_OP_FLAG_LEARNED;
3716 int bkt;
3717
3718 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3719
3720 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3721 if (!entry->learned)
3722 continue;
3723 rocker_port = entry->key.rocker_port;
3724 expires = entry->touched + rocker_port->ageing_time;
3725 if (time_before_eq(expires, jiffies)) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02003726 rocker_port_fdb_learn(rocker_port, NULL,
Scott Feldman52fe3e22015-09-23 08:39:18 -07003727 flags, entry->key.addr,
3728 entry->key.vlan_id);
3729 hash_del(&entry->entry);
3730 } else if (time_before(expires, next_timer)) {
3731 next_timer = expires;
3732 }
3733 }
3734
3735 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3736
3737 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3738}
3739
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003740static int rocker_port_router_mac(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003741 struct switchdev_trans *trans, int flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003742 __be16 vlan_id)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003743{
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003744 u32 in_pport_mask = 0xffffffff;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003745 __be16 eth_type;
3746 const u8 *dst_mac_mask = ff_mac;
3747 __be16 vlan_id_mask = htons(0xffff);
3748 bool copy_to_cpu = false;
3749 int err;
3750
3751 if (ntohs(vlan_id) == 0)
3752 vlan_id = rocker_port->internal_vlan_id;
3753
3754 eth_type = htons(ETH_P_IP);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003755 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003756 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003757 eth_type, rocker_port->dev->dev_addr,
3758 dst_mac_mask, vlan_id, vlan_id_mask,
3759 copy_to_cpu, flags);
3760 if (err)
3761 return err;
3762
3763 eth_type = htons(ETH_P_IPV6);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003764 err = rocker_flow_tbl_term_mac(rocker_port, trans,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003765 rocker_port->pport, in_pport_mask,
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003766 eth_type, rocker_port->dev->dev_addr,
3767 dst_mac_mask, vlan_id, vlan_id_mask,
3768 copy_to_cpu, flags);
3769
3770 return err;
3771}
3772
Scott Feldmanc4f20322015-05-10 09:47:50 -07003773static int rocker_port_fwding(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003774 struct switchdev_trans *trans, int flags)
Scott Feldman6c707942014-11-28 14:34:28 +01003775{
3776 bool pop_vlan;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003777 u32 out_pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003778 __be16 vlan_id;
3779 u16 vid;
Scott Feldman6c707942014-11-28 14:34:28 +01003780 int err;
3781
3782 /* Port will be forwarding-enabled if its STP state is LEARNING
3783 * or FORWARDING. Traffic from CPU can still egress, regardless of
3784 * port STP state. Use L2 interface group on port VLANs as a way
3785 * to toggle port forwarding: if forwarding is disabled, L2
3786 * interface group will not exist.
3787 */
3788
3789 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3790 rocker_port->stp_state != BR_STATE_FORWARDING)
3791 flags |= ROCKER_OP_FLAG_REMOVE;
3792
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003793 out_pport = rocker_port->pport;
Scott Feldman6c707942014-11-28 14:34:28 +01003794 for (vid = 1; vid < VLAN_N_VID; vid++) {
3795 if (!test_bit(vid, rocker_port->vlan_bitmap))
3796 continue;
3797 vlan_id = htons(vid);
3798 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003799 err = rocker_group_l2_interface(rocker_port, trans, flags,
Scott Feldmanc4f20322015-05-10 09:47:50 -07003800 vlan_id, out_pport, pop_vlan);
Scott Feldman6c707942014-11-28 14:34:28 +01003801 if (err) {
3802 netdev_err(rocker_port->dev,
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08003803 "Error (%d) port VLAN l2 group for pport %d\n",
3804 err, out_pport);
Scott Feldman6c707942014-11-28 14:34:28 +01003805 return err;
3806 }
3807 }
3808
3809 return 0;
3810}
3811
Scott Feldmanc4f20322015-05-10 09:47:50 -07003812static int rocker_port_stp_update(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003813 struct switchdev_trans *trans, int flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07003814 u8 state)
Scott Feldman6c707942014-11-28 14:34:28 +01003815{
3816 bool want[ROCKER_CTRL_MAX] = { 0, };
Scott Feldmanc4f20322015-05-10 09:47:50 -07003817 bool prev_ctrls[ROCKER_CTRL_MAX];
Jiri Pirko76c6f942015-09-24 10:02:44 +02003818 u8 uninitialized_var(prev_state);
Scott Feldman6c707942014-11-28 14:34:28 +01003819 int err;
3820 int i;
3821
Jiri Pirko76c6f942015-09-24 10:02:44 +02003822 if (switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003823 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3824 prev_state = rocker_port->stp_state;
3825 }
3826
Scott Feldman6c707942014-11-28 14:34:28 +01003827 if (rocker_port->stp_state == state)
3828 return 0;
3829
3830 rocker_port->stp_state = state;
3831
3832 switch (state) {
3833 case BR_STATE_DISABLED:
3834 /* port is completely disabled */
3835 break;
3836 case BR_STATE_LISTENING:
3837 case BR_STATE_BLOCKING:
3838 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3839 break;
3840 case BR_STATE_LEARNING:
3841 case BR_STATE_FORWARDING:
Simon Horman82549732015-07-16 10:39:14 +09003842 if (!rocker_port_is_ovsed(rocker_port))
3843 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003844 want[ROCKER_CTRL_IPV4_MCAST] = true;
3845 want[ROCKER_CTRL_IPV6_MCAST] = true;
3846 if (rocker_port_is_bridged(rocker_port))
3847 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
Simon Horman82549732015-07-16 10:39:14 +09003848 else if (rocker_port_is_ovsed(rocker_port))
3849 want[ROCKER_CTRL_DFLT_OVS] = true;
Scott Feldman6c707942014-11-28 14:34:28 +01003850 else
3851 want[ROCKER_CTRL_LOCAL_ARP] = true;
3852 break;
3853 }
3854
3855 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3856 if (want[i] != rocker_port->ctrls[i]) {
Scott Feldman179f9a22015-06-12 21:35:46 -07003857 int ctrl_flags = flags |
3858 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
Jiri Pirko76c6f942015-09-24 10:02:44 +02003859 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
Scott Feldman6c707942014-11-28 14:34:28 +01003860 &rocker_ctrls[i]);
3861 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003862 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003863 rocker_port->ctrls[i] = want[i];
3864 }
3865 }
3866
Jiri Pirko76c6f942015-09-24 10:02:44 +02003867 err = rocker_port_fdb_flush(rocker_port, trans, flags);
Scott Feldman6c707942014-11-28 14:34:28 +01003868 if (err)
Scott Feldmanc4f20322015-05-10 09:47:50 -07003869 goto err_out;
Scott Feldman6c707942014-11-28 14:34:28 +01003870
Jiri Pirko76c6f942015-09-24 10:02:44 +02003871 err = rocker_port_fwding(rocker_port, trans, flags);
Scott Feldmanc4f20322015-05-10 09:47:50 -07003872
3873err_out:
Jiri Pirko76c6f942015-09-24 10:02:44 +02003874 if (switchdev_trans_ph_prepare(trans)) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07003875 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3876 rocker_port->stp_state = prev_state;
3877 }
3878
3879 return err;
Scott Feldman6c707942014-11-28 14:34:28 +01003880}
3881
Scott Feldmanc4f20322015-05-10 09:47:50 -07003882static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003883 struct switchdev_trans *trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003884{
3885 if (rocker_port_is_bridged(rocker_port))
3886 /* bridge STP will enable port */
3887 return 0;
3888
3889 /* port is not bridged, so simulate going to FORWARDING state */
Jiri Pirko76c6f942015-09-24 10:02:44 +02003890 return rocker_port_stp_update(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07003891 BR_STATE_FORWARDING);
Scott Feldmane47172a2015-02-25 20:15:38 -08003892}
3893
Scott Feldmanc4f20322015-05-10 09:47:50 -07003894static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003895 struct switchdev_trans *trans, int flags)
Scott Feldmane47172a2015-02-25 20:15:38 -08003896{
3897 if (rocker_port_is_bridged(rocker_port))
3898 /* bridge STP will disable port */
3899 return 0;
3900
3901 /* port is not bridged, so simulate going to DISABLED state */
Jiri Pirko76c6f942015-09-24 10:02:44 +02003902 return rocker_port_stp_update(rocker_port, trans, flags,
Scott Feldman179f9a22015-06-12 21:35:46 -07003903 BR_STATE_DISABLED);
Scott Feldmane47172a2015-02-25 20:15:38 -08003904}
3905
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003906static struct rocker_internal_vlan_tbl_entry *
Simon Hormane5054642015-05-25 14:28:36 +09003907rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003908{
3909 struct rocker_internal_vlan_tbl_entry *found;
3910
3911 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3912 entry, ifindex) {
3913 if (found->ifindex == ifindex)
3914 return found;
3915 }
3916
3917 return NULL;
3918}
3919
3920static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3921 int ifindex)
3922{
3923 struct rocker *rocker = rocker_port->rocker;
3924 struct rocker_internal_vlan_tbl_entry *entry;
3925 struct rocker_internal_vlan_tbl_entry *found;
3926 unsigned long lock_flags;
3927 int i;
3928
Simon Hormandf6a2062015-05-21 12:40:17 +09003929 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003930 if (!entry)
3931 return 0;
3932
3933 entry->ifindex = ifindex;
3934
3935 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3936
3937 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3938 if (found) {
Simon Hormandf6a2062015-05-21 12:40:17 +09003939 kfree(entry);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003940 goto found;
3941 }
3942
3943 found = entry;
3944 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3945
3946 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3947 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3948 continue;
3949 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3950 goto found;
3951 }
3952
3953 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3954
3955found:
3956 found->ref_count++;
3957 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3958
3959 return found->vlan_id;
3960}
3961
Simon Hormane5054642015-05-25 14:28:36 +09003962static void
3963rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3964 int ifindex)
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003965{
3966 struct rocker *rocker = rocker_port->rocker;
3967 struct rocker_internal_vlan_tbl_entry *found;
3968 unsigned long lock_flags;
3969 unsigned long bit;
3970
3971 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3972
3973 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3974 if (!found) {
3975 netdev_err(rocker_port->dev,
3976 "ifindex (%d) not found in internal VLAN tbl\n",
3977 ifindex);
3978 goto not_found;
3979 }
3980
3981 if (--found->ref_count <= 0) {
3982 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3983 clear_bit(bit, rocker->internal_vlan_bitmap);
3984 hash_del(&found->entry);
Simon Hormandf6a2062015-05-21 12:40:17 +09003985 kfree(found);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01003986 }
3987
3988not_found:
3989 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3990}
3991
Scott Feldmanc4f20322015-05-10 09:47:50 -07003992static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02003993 struct switchdev_trans *trans, __be32 dst,
Simon Hormane5054642015-05-25 14:28:36 +09003994 int dst_len, const struct fib_info *fi,
3995 u32 tb_id, int flags)
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003996{
Simon Hormane5054642015-05-25 14:28:36 +09003997 const struct fib_nh *nh;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08003998 __be16 eth_type = htons(ETH_P_IP);
3999 __be32 dst_mask = inet_make_mask(dst_len);
4000 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4001 u32 priority = fi->fib_priority;
4002 enum rocker_of_dpa_table_id goto_tbl =
4003 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4004 u32 group_id;
4005 bool nh_on_port;
4006 bool has_gw;
4007 u32 index;
4008 int err;
4009
4010 /* XXX support ECMP */
4011
4012 nh = fi->fib_nh;
4013 nh_on_port = (fi->fib_dev == rocker_port->dev);
4014 has_gw = !!nh->nh_gw;
4015
4016 if (has_gw && nh_on_port) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02004017 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004018 nh->nh_gw, &index);
4019 if (err)
4020 return err;
4021
4022 group_id = ROCKER_GROUP_L3_UNICAST(index);
4023 } else {
4024 /* Send to CPU for processing */
4025 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4026 }
4027
Jiri Pirko76c6f942015-09-24 10:02:44 +02004028 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
Scott Feldmanc1beeef2015-03-05 21:21:20 -08004029 dst_mask, priority, goto_tbl,
4030 group_id, flags);
4031 if (err)
4032 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4033 err, &dst);
4034
4035 return err;
4036}
4037
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004038/*****************
4039 * Net device ops
4040 *****************/
4041
4042static int rocker_port_open(struct net_device *dev)
4043{
4044 struct rocker_port *rocker_port = netdev_priv(dev);
4045 int err;
4046
4047 err = rocker_port_dma_rings_init(rocker_port);
4048 if (err)
4049 return err;
4050
4051 err = request_irq(rocker_msix_tx_vector(rocker_port),
4052 rocker_tx_irq_handler, 0,
4053 rocker_driver_name, rocker_port);
4054 if (err) {
4055 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4056 goto err_request_tx_irq;
4057 }
4058
4059 err = request_irq(rocker_msix_rx_vector(rocker_port),
4060 rocker_rx_irq_handler, 0,
4061 rocker_driver_name, rocker_port);
4062 if (err) {
4063 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4064 goto err_request_rx_irq;
4065 }
4066
Jiri Pirko76c6f942015-09-24 10:02:44 +02004067 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01004068 if (err)
Scott Feldmane47172a2015-02-25 20:15:38 -08004069 goto err_fwd_enable;
Scott Feldman6c707942014-11-28 14:34:28 +01004070
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004071 napi_enable(&rocker_port->napi_tx);
4072 napi_enable(&rocker_port->napi_rx);
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004073 if (!dev->proto_down)
4074 rocker_port_set_enable(rocker_port, true);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004075 netif_start_queue(dev);
4076 return 0;
4077
Scott Feldmane47172a2015-02-25 20:15:38 -08004078err_fwd_enable:
Scott Feldman6c707942014-11-28 14:34:28 +01004079 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004080err_request_rx_irq:
4081 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4082err_request_tx_irq:
4083 rocker_port_dma_rings_fini(rocker_port);
4084 return err;
4085}
4086
4087static int rocker_port_stop(struct net_device *dev)
4088{
4089 struct rocker_port *rocker_port = netdev_priv(dev);
4090
4091 netif_stop_queue(dev);
4092 rocker_port_set_enable(rocker_port, false);
4093 napi_disable(&rocker_port->napi_rx);
4094 napi_disable(&rocker_port->napi_tx);
Jiri Pirko76c6f942015-09-24 10:02:44 +02004095 rocker_port_fwd_disable(rocker_port, NULL,
Scott Feldmanf66feaa2015-06-12 21:35:50 -07004096 ROCKER_OP_FLAG_NOWAIT);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004097 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4098 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4099 rocker_port_dma_rings_fini(rocker_port);
4100
4101 return 0;
4102}
4103
Simon Hormane5054642015-05-25 14:28:36 +09004104static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4105 const struct rocker_desc_info *desc_info)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004106{
Simon Hormane5054642015-05-25 14:28:36 +09004107 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004108 struct pci_dev *pdev = rocker->pdev;
Simon Hormane5054642015-05-25 14:28:36 +09004109 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004110 struct rocker_tlv *attr;
4111 int rem;
4112
4113 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4114 if (!attrs[ROCKER_TLV_TX_FRAGS])
4115 return;
4116 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
Simon Hormane5054642015-05-25 14:28:36 +09004117 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004118 dma_addr_t dma_handle;
4119 size_t len;
4120
4121 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4122 continue;
4123 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4124 attr);
4125 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4126 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4127 continue;
4128 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4129 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4130 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4131 }
4132}
4133
Simon Hormane5054642015-05-25 14:28:36 +09004134static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004135 struct rocker_desc_info *desc_info,
4136 char *buf, size_t buf_len)
4137{
Simon Hormane5054642015-05-25 14:28:36 +09004138 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004139 struct pci_dev *pdev = rocker->pdev;
4140 dma_addr_t dma_handle;
4141 struct rocker_tlv *frag;
4142
4143 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4144 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4145 if (net_ratelimit())
4146 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4147 return -EIO;
4148 }
4149 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4150 if (!frag)
4151 goto unmap_frag;
4152 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4153 dma_handle))
4154 goto nest_cancel;
4155 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4156 buf_len))
4157 goto nest_cancel;
4158 rocker_tlv_nest_end(desc_info, frag);
4159 return 0;
4160
4161nest_cancel:
4162 rocker_tlv_nest_cancel(desc_info, frag);
4163unmap_frag:
4164 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4165 return -EMSGSIZE;
4166}
4167
4168static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4169{
4170 struct rocker_port *rocker_port = netdev_priv(dev);
4171 struct rocker *rocker = rocker_port->rocker;
4172 struct rocker_desc_info *desc_info;
4173 struct rocker_tlv *frags;
4174 int i;
4175 int err;
4176
4177 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4178 if (unlikely(!desc_info)) {
4179 if (net_ratelimit())
4180 netdev_err(dev, "tx ring full when queue awake\n");
4181 return NETDEV_TX_BUSY;
4182 }
4183
4184 rocker_desc_cookie_ptr_set(desc_info, skb);
4185
4186 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4187 if (!frags)
4188 goto out;
4189 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4190 skb->data, skb_headlen(skb));
4191 if (err)
4192 goto nest_cancel;
Jiri Pirko95b9be62015-08-02 20:56:38 +02004193 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4194 err = skb_linearize(skb);
4195 if (err)
4196 goto unmap_frags;
4197 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004198
4199 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4200 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4201
4202 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4203 skb_frag_address(frag),
4204 skb_frag_size(frag));
4205 if (err)
4206 goto unmap_frags;
4207 }
4208 rocker_tlv_nest_end(desc_info, frags);
4209
4210 rocker_desc_gen_clear(desc_info);
4211 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4212
4213 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4214 if (!desc_info)
4215 netif_stop_queue(dev);
4216
4217 return NETDEV_TX_OK;
4218
4219unmap_frags:
4220 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4221nest_cancel:
4222 rocker_tlv_nest_cancel(desc_info, frags);
4223out:
4224 dev_kfree_skb(skb);
David Ahernf2bbca52015-01-16 14:22:29 -07004225 dev->stats.tx_dropped++;
4226
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004227 return NETDEV_TX_OK;
4228}
4229
4230static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4231{
4232 struct sockaddr *addr = p;
4233 struct rocker_port *rocker_port = netdev_priv(dev);
4234 int err;
4235
4236 if (!is_valid_ether_addr(addr->sa_data))
4237 return -EADDRNOTAVAIL;
4238
4239 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4240 if (err)
4241 return err;
4242 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4243 return 0;
4244}
4245
Scott Feldman77a58c72015-07-08 16:06:47 -07004246static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4247{
4248 struct rocker_port *rocker_port = netdev_priv(dev);
4249 int running = netif_running(dev);
4250 int err;
4251
4252#define ROCKER_PORT_MIN_MTU 68
4253#define ROCKER_PORT_MAX_MTU 9000
4254
4255 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4256 return -EINVAL;
4257
4258 if (running)
4259 rocker_port_stop(dev);
4260
4261 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4262 dev->mtu = new_mtu;
4263
4264 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4265 if (err)
4266 return err;
4267
4268 if (running)
4269 err = rocker_port_open(dev);
4270
4271 return err;
4272}
4273
David Aherndb191702015-03-17 20:23:16 -06004274static int rocker_port_get_phys_port_name(struct net_device *dev,
4275 char *buf, size_t len)
4276{
4277 struct rocker_port *rocker_port = netdev_priv(dev);
4278 struct port_name name = { .buf = buf, .len = len };
4279 int err;
4280
Jiri Pirko76c6f942015-09-24 10:02:44 +02004281 err = rocker_cmd_exec(rocker_port, NULL, 0,
David Aherndb191702015-03-17 20:23:16 -06004282 rocker_cmd_get_port_settings_prep, NULL,
4283 rocker_cmd_get_port_settings_phys_name_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004284 &name);
David Aherndb191702015-03-17 20:23:16 -06004285
4286 return err ? -EOPNOTSUPP : 0;
4287}
4288
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004289static int rocker_port_change_proto_down(struct net_device *dev,
4290 bool proto_down)
4291{
4292 struct rocker_port *rocker_port = netdev_priv(dev);
4293
4294 if (rocker_port->dev->flags & IFF_UP)
4295 rocker_port_set_enable(rocker_port, !proto_down);
4296 rocker_port->dev->proto_down = proto_down;
4297 return 0;
4298}
4299
Scott Feldmandd19f832015-08-12 18:45:25 -07004300static void rocker_port_neigh_destroy(struct neighbour *n)
4301{
4302 struct rocker_port *rocker_port = netdev_priv(n->dev);
4303 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4304 __be32 ip_addr = *(__be32 *)n->primary_key;
4305
Jiri Pirko76c6f942015-09-24 10:02:44 +02004306 rocker_port_ipv4_neigh(rocker_port, NULL,
Scott Feldmandd19f832015-08-12 18:45:25 -07004307 flags, ip_addr, n->ha);
4308}
4309
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004310static const struct net_device_ops rocker_port_netdev_ops = {
4311 .ndo_open = rocker_port_open,
4312 .ndo_stop = rocker_port_stop,
4313 .ndo_start_xmit = rocker_port_xmit,
4314 .ndo_set_mac_address = rocker_port_set_mac_address,
Scott Feldman77a58c72015-07-08 16:06:47 -07004315 .ndo_change_mtu = rocker_port_change_mtu,
Scott Feldman85fdb952015-05-10 09:48:05 -07004316 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
Scott Feldmanfc8f40d2015-05-10 09:47:57 -07004317 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
Scott Feldman54ba5a02015-05-10 09:48:01 -07004318 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004319 .ndo_fdb_add = switchdev_port_fdb_add,
4320 .ndo_fdb_del = switchdev_port_fdb_del,
4321 .ndo_fdb_dump = switchdev_port_fdb_dump,
David Aherndb191702015-03-17 20:23:16 -06004322 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
Anuradha Karuppiahc3055242015-07-14 13:43:21 -07004323 .ndo_change_proto_down = rocker_port_change_proto_down,
Scott Feldmandd19f832015-08-12 18:45:25 -07004324 .ndo_neigh_destroy = rocker_port_neigh_destroy,
Scott Feldman98237d42015-03-15 21:07:15 -07004325};
4326
4327/********************
4328 * swdev interface
4329 ********************/
4330
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004331static int rocker_port_attr_get(struct net_device *dev,
4332 struct switchdev_attr *attr)
Scott Feldman98237d42015-03-15 21:07:15 -07004333{
Simon Hormane5054642015-05-25 14:28:36 +09004334 const struct rocker_port *rocker_port = netdev_priv(dev);
4335 const struct rocker *rocker = rocker_port->rocker;
Scott Feldman98237d42015-03-15 21:07:15 -07004336
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004337 switch (attr->id) {
4338 case SWITCHDEV_ATTR_PORT_PARENT_ID:
Scott Feldman42275bd2015-05-13 11:16:50 -07004339 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4340 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004341 break;
Scott Feldman6004c862015-05-10 09:47:55 -07004342 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
Scott Feldman42275bd2015-05-13 11:16:50 -07004343 attr->u.brport_flags = rocker_port->brport_flags;
Scott Feldman6004c862015-05-10 09:47:55 -07004344 break;
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004345 default:
4346 return -EOPNOTSUPP;
4347 }
4348
Scott Feldman98237d42015-03-15 21:07:15 -07004349 return 0;
4350}
4351
Simon Hormane5054642015-05-25 14:28:36 +09004352static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
Scott Feldmanc4f20322015-05-10 09:47:50 -07004353{
4354 struct list_head *mem, *tmp;
4355
4356 list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
4357 list_del(mem);
4358 kfree(mem);
4359 }
4360}
4361
Scott Feldman6004c862015-05-10 09:47:55 -07004362static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004363 struct switchdev_trans *trans,
Scott Feldman6004c862015-05-10 09:47:55 -07004364 unsigned long brport_flags)
4365{
4366 unsigned long orig_flags;
4367 int err = 0;
4368
4369 orig_flags = rocker_port->brport_flags;
4370 rocker_port->brport_flags = brport_flags;
4371 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
Jiri Pirko76c6f942015-09-24 10:02:44 +02004372 err = rocker_port_set_learning(rocker_port, trans);
Scott Feldman6004c862015-05-10 09:47:55 -07004373
Jiri Pirko76c6f942015-09-24 10:02:44 +02004374 if (switchdev_trans_ph_prepare(trans))
Scott Feldman6004c862015-05-10 09:47:55 -07004375 rocker_port->brport_flags = orig_flags;
4376
4377 return err;
4378}
4379
Scott Feldmanc4f20322015-05-10 09:47:50 -07004380static int rocker_port_attr_set(struct net_device *dev,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02004381 struct switchdev_attr *attr,
4382 struct switchdev_trans *trans)
Scott Feldmanc4f20322015-05-10 09:47:50 -07004383{
4384 struct rocker_port *rocker_port = netdev_priv(dev);
4385 int err = 0;
4386
Jiri Pirkof8db8342015-09-24 10:02:42 +02004387 switch (trans->ph) {
Scott Feldmanc4f20322015-05-10 09:47:50 -07004388 case SWITCHDEV_TRANS_PREPARE:
4389 BUG_ON(!list_empty(&rocker_port->trans_mem));
4390 break;
4391 case SWITCHDEV_TRANS_ABORT:
4392 rocker_port_trans_abort(rocker_port);
4393 return 0;
4394 default:
4395 break;
4396 }
4397
4398 switch (attr->id) {
Scott Feldman35636062015-05-10 09:47:51 -07004399 case SWITCHDEV_ATTR_PORT_STP_STATE:
Jiri Pirko76c6f942015-09-24 10:02:44 +02004400 err = rocker_port_stp_update(rocker_port, trans,
Scott Feldmanac283932015-06-12 21:35:48 -07004401 ROCKER_OP_FLAG_NOWAIT,
Scott Feldman42275bd2015-05-13 11:16:50 -07004402 attr->u.stp_state);
Scott Feldman35636062015-05-10 09:47:51 -07004403 break;
Scott Feldman6004c862015-05-10 09:47:55 -07004404 case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
Jiri Pirko76c6f942015-09-24 10:02:44 +02004405 err = rocker_port_brport_flags_set(rocker_port, trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004406 attr->u.brport_flags);
Scott Feldman6004c862015-05-10 09:47:55 -07004407 break;
Scott Feldmanc4f20322015-05-10 09:47:50 -07004408 default:
4409 err = -EOPNOTSUPP;
4410 break;
4411 }
4412
4413 return err;
Scott Feldman98237d42015-03-15 21:07:15 -07004414}
4415
Scott Feldman9228ad22015-05-10 09:47:54 -07004416static int rocker_port_vlan_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004417 struct switchdev_trans *trans,
4418 u16 vid, u16 flags)
Scott Feldman9228ad22015-05-10 09:47:54 -07004419{
4420 int err;
4421
4422 /* XXX deal with flags for PVID and untagged */
4423
Jiri Pirko76c6f942015-09-24 10:02:44 +02004424 err = rocker_port_vlan(rocker_port, trans, 0, vid);
Scott Feldman9228ad22015-05-10 09:47:54 -07004425 if (err)
4426 return err;
4427
Jiri Pirko76c6f942015-09-24 10:02:44 +02004428 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
Scott Feldmancec04a62015-06-01 11:39:03 -07004429 if (err)
Jiri Pirko76c6f942015-09-24 10:02:44 +02004430 rocker_port_vlan(rocker_port, trans,
Scott Feldmancec04a62015-06-01 11:39:03 -07004431 ROCKER_OP_FLAG_REMOVE, vid);
4432
4433 return err;
Scott Feldman9228ad22015-05-10 09:47:54 -07004434}
4435
4436static int rocker_port_vlans_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004437 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09004438 const struct switchdev_obj_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004439{
4440 u16 vid;
4441 int err;
4442
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004443 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Jiri Pirko76c6f942015-09-24 10:02:44 +02004444 err = rocker_port_vlan_add(rocker_port, trans,
Scott Feldman9228ad22015-05-10 09:47:54 -07004445 vid, vlan->flags);
4446 if (err)
4447 return err;
4448 }
4449
4450 return 0;
4451}
4452
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004453static int rocker_port_fdb_add(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004454 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09004455 const struct switchdev_obj_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004456{
4457 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4458 int flags = 0;
4459
4460 if (!rocker_port_is_bridged(rocker_port))
4461 return -EINVAL;
4462
Jiri Pirko76c6f942015-09-24 10:02:44 +02004463 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004464}
4465
Scott Feldman9228ad22015-05-10 09:47:54 -07004466static int rocker_port_obj_add(struct net_device *dev,
Jiri Pirko7ea6eb32015-09-24 10:02:41 +02004467 struct switchdev_obj *obj,
4468 struct switchdev_trans *trans)
Scott Feldman9228ad22015-05-10 09:47:54 -07004469{
4470 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004471 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004472 int err = 0;
4473
Jiri Pirkof8db8342015-09-24 10:02:42 +02004474 switch (trans->ph) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004475 case SWITCHDEV_TRANS_PREPARE:
4476 BUG_ON(!list_empty(&rocker_port->trans_mem));
4477 break;
4478 case SWITCHDEV_TRANS_ABORT:
4479 rocker_port_trans_abort(rocker_port);
4480 return 0;
4481 default:
4482 break;
4483 }
4484
4485 switch (obj->id) {
4486 case SWITCHDEV_OBJ_PORT_VLAN:
Jiri Pirko76c6f942015-09-24 10:02:44 +02004487 err = rocker_port_vlans_add(rocker_port, trans,
Scott Feldman42275bd2015-05-13 11:16:50 -07004488 &obj->u.vlan);
Scott Feldman9228ad22015-05-10 09:47:54 -07004489 break;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004490 case SWITCHDEV_OBJ_IPV4_FIB:
Scott Feldman42275bd2015-05-13 11:16:50 -07004491 fib4 = &obj->u.ipv4_fib;
Jiri Pirko76c6f942015-09-24 10:02:44 +02004492 err = rocker_port_fib_ipv4(rocker_port, trans,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004493 htonl(fib4->dst), fib4->dst_len,
Scott Feldman58c2cb12015-05-10 09:48:06 -07004494 fib4->fi, fib4->tb_id, 0);
4495 break;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004496 case SWITCHDEV_OBJ_PORT_FDB:
Jiri Pirko76c6f942015-09-24 10:02:44 +02004497 err = rocker_port_fdb_add(rocker_port, trans, &obj->u.fdb);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004498 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004499 default:
4500 err = -EOPNOTSUPP;
4501 break;
4502 }
4503
4504 return err;
4505}
4506
4507static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4508 u16 vid, u16 flags)
4509{
4510 int err;
4511
Jiri Pirko76c6f942015-09-24 10:02:44 +02004512 err = rocker_port_router_mac(rocker_port, NULL,
Scott Feldman9228ad22015-05-10 09:47:54 -07004513 ROCKER_OP_FLAG_REMOVE, htons(vid));
4514 if (err)
4515 return err;
4516
Jiri Pirko76c6f942015-09-24 10:02:44 +02004517 return rocker_port_vlan(rocker_port, NULL,
Scott Feldman9228ad22015-05-10 09:47:54 -07004518 ROCKER_OP_FLAG_REMOVE, vid);
4519}
4520
4521static int rocker_port_vlans_del(struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004522 const struct switchdev_obj_vlan *vlan)
Scott Feldman9228ad22015-05-10 09:47:54 -07004523{
4524 u16 vid;
4525 int err;
4526
Scott Feldman3e3a78b2015-06-22 00:27:16 -07004527 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
Scott Feldman9228ad22015-05-10 09:47:54 -07004528 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4529 if (err)
4530 return err;
4531 }
4532
4533 return 0;
4534}
4535
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004536static int rocker_port_fdb_del(struct rocker_port *rocker_port,
Jiri Pirko76c6f942015-09-24 10:02:44 +02004537 struct switchdev_trans *trans,
Simon Hormane5054642015-05-25 14:28:36 +09004538 const struct switchdev_obj_fdb *fdb)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004539{
4540 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
Scott Feldmanb4ad7ba2015-06-14 11:33:11 -07004541 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004542
4543 if (!rocker_port_is_bridged(rocker_port))
4544 return -EINVAL;
4545
Jiri Pirko76c6f942015-09-24 10:02:44 +02004546 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004547}
4548
Scott Feldman9228ad22015-05-10 09:47:54 -07004549static int rocker_port_obj_del(struct net_device *dev,
4550 struct switchdev_obj *obj)
4551{
4552 struct rocker_port *rocker_port = netdev_priv(dev);
Simon Hormane5054642015-05-25 14:28:36 +09004553 const struct switchdev_obj_ipv4_fib *fib4;
Scott Feldman9228ad22015-05-10 09:47:54 -07004554 int err = 0;
4555
4556 switch (obj->id) {
4557 case SWITCHDEV_OBJ_PORT_VLAN:
Scott Feldman42275bd2015-05-13 11:16:50 -07004558 err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
Scott Feldman9228ad22015-05-10 09:47:54 -07004559 break;
Scott Feldman58c2cb12015-05-10 09:48:06 -07004560 case SWITCHDEV_OBJ_IPV4_FIB:
Scott Feldman42275bd2015-05-13 11:16:50 -07004561 fib4 = &obj->u.ipv4_fib;
Jiri Pirko76c6f942015-09-24 10:02:44 +02004562 err = rocker_port_fib_ipv4(rocker_port, NULL,
Scott Feldman7a7ee532015-05-12 23:03:52 -07004563 htonl(fib4->dst), fib4->dst_len,
4564 fib4->fi, fib4->tb_id,
4565 ROCKER_OP_FLAG_REMOVE);
Scott Feldman58c2cb12015-05-10 09:48:06 -07004566 break;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004567 case SWITCHDEV_OBJ_PORT_FDB:
Jiri Pirko76c6f942015-09-24 10:02:44 +02004568 err = rocker_port_fdb_del(rocker_port, NULL, &obj->u.fdb);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004569 break;
4570 default:
4571 err = -EOPNOTSUPP;
4572 break;
4573 }
4574
4575 return err;
4576}
4577
Simon Hormane5054642015-05-25 14:28:36 +09004578static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004579 struct switchdev_obj *obj)
4580{
4581 struct rocker *rocker = rocker_port->rocker;
4582 struct switchdev_obj_fdb *fdb = &obj->u.fdb;
4583 struct rocker_fdb_tbl_entry *found;
4584 struct hlist_node *tmp;
4585 unsigned long lock_flags;
4586 int bkt;
4587 int err = 0;
4588
4589 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4590 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
Scott Feldman4c660492015-09-23 08:39:15 -07004591 if (found->key.rocker_port != rocker_port)
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004592 continue;
David S. Millercdf09692015-08-11 12:00:37 -07004593 fdb->addr = found->key.addr;
Vivien Didelotce80e7b2015-08-10 09:09:52 -04004594 fdb->ndm_state = NUD_REACHABLE;
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004595 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4596 found->key.vlan_id);
4597 err = obj->cb(rocker_port->dev, obj);
4598 if (err)
4599 break;
4600 }
4601 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4602
4603 return err;
4604}
4605
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004606static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4607 struct switchdev_obj *obj)
4608{
4609 struct switchdev_obj_vlan *vlan = &obj->u.vlan;
4610 u16 vid;
4611 int err = 0;
4612
4613 for (vid = 1; vid < VLAN_N_VID; vid++) {
4614 if (!test_bit(vid, rocker_port->vlan_bitmap))
4615 continue;
4616 vlan->flags = 0;
4617 if (rocker_vlan_id_is_internal(htons(vid)))
4618 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4619 vlan->vid_begin = vlan->vid_end = vid;
4620 err = obj->cb(rocker_port->dev, obj);
4621 if (err)
4622 break;
4623 }
4624
4625 return err;
4626}
4627
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004628static int rocker_port_obj_dump(struct net_device *dev,
4629 struct switchdev_obj *obj)
4630{
Simon Hormane5054642015-05-25 14:28:36 +09004631 const struct rocker_port *rocker_port = netdev_priv(dev);
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004632 int err = 0;
4633
4634 switch (obj->id) {
4635 case SWITCHDEV_OBJ_PORT_FDB:
4636 err = rocker_port_fdb_dump(rocker_port, obj);
4637 break;
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004638 case SWITCHDEV_OBJ_PORT_VLAN:
4639 err = rocker_port_vlan_dump(rocker_port, obj);
4640 break;
Scott Feldman9228ad22015-05-10 09:47:54 -07004641 default:
4642 err = -EOPNOTSUPP;
4643 break;
4644 }
4645
4646 return err;
4647}
4648
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07004649static const struct switchdev_ops rocker_port_switchdev_ops = {
Scott Feldmanf8e20a92015-05-10 09:47:49 -07004650 .switchdev_port_attr_get = rocker_port_attr_get,
Scott Feldman35636062015-05-10 09:47:51 -07004651 .switchdev_port_attr_set = rocker_port_attr_set,
Scott Feldman9228ad22015-05-10 09:47:54 -07004652 .switchdev_port_obj_add = rocker_port_obj_add,
4653 .switchdev_port_obj_del = rocker_port_obj_del,
Samudrala, Sridhar45d41222015-05-13 21:55:43 -07004654 .switchdev_port_obj_dump = rocker_port_obj_dump,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004655};
4656
4657/********************
4658 * ethtool interface
4659 ********************/
4660
4661static int rocker_port_get_settings(struct net_device *dev,
4662 struct ethtool_cmd *ecmd)
4663{
4664 struct rocker_port *rocker_port = netdev_priv(dev);
4665
4666 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4667}
4668
4669static int rocker_port_set_settings(struct net_device *dev,
4670 struct ethtool_cmd *ecmd)
4671{
4672 struct rocker_port *rocker_port = netdev_priv(dev);
4673
4674 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4675}
4676
4677static void rocker_port_get_drvinfo(struct net_device *dev,
4678 struct ethtool_drvinfo *drvinfo)
4679{
4680 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4681 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4682}
4683
David Ahern9766e972015-01-29 20:59:33 -07004684static struct rocker_port_stats {
4685 char str[ETH_GSTRING_LEN];
4686 int type;
4687} rocker_port_stats[] = {
4688 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4689 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4690 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4691 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4692
4693 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4694 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4695 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4696 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4697};
4698
4699#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4700
4701static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4702 u8 *data)
4703{
4704 u8 *p = data;
4705 int i;
4706
4707 switch (stringset) {
4708 case ETH_SS_STATS:
4709 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4710 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4711 p += ETH_GSTRING_LEN;
4712 }
4713 break;
4714 }
4715}
4716
4717static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004718rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
David Ahern9766e972015-01-29 20:59:33 -07004719 struct rocker_desc_info *desc_info,
4720 void *priv)
4721{
4722 struct rocker_tlv *cmd_stats;
4723
4724 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4725 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4726 return -EMSGSIZE;
4727
4728 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4729 if (!cmd_stats)
4730 return -EMSGSIZE;
4731
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004732 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4733 rocker_port->pport))
David Ahern9766e972015-01-29 20:59:33 -07004734 return -EMSGSIZE;
4735
4736 rocker_tlv_nest_end(desc_info, cmd_stats);
4737
4738 return 0;
4739}
4740
4741static int
Simon Horman534ba6a2015-06-01 13:25:04 +09004742rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
Simon Hormane5054642015-05-25 14:28:36 +09004743 const struct rocker_desc_info *desc_info,
David Ahern9766e972015-01-29 20:59:33 -07004744 void *priv)
4745{
Simon Hormane5054642015-05-25 14:28:36 +09004746 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4747 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4748 const struct rocker_tlv *pattr;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004749 u32 pport;
David Ahern9766e972015-01-29 20:59:33 -07004750 u64 *data = priv;
4751 int i;
4752
4753 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4754
4755 if (!attrs[ROCKER_TLV_CMD_INFO])
4756 return -EIO;
4757
4758 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4759 attrs[ROCKER_TLV_CMD_INFO]);
4760
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004761 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
David Ahern9766e972015-01-29 20:59:33 -07004762 return -EIO;
4763
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004764 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4765 if (pport != rocker_port->pport)
David Ahern9766e972015-01-29 20:59:33 -07004766 return -EIO;
4767
4768 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4769 pattr = stats_attrs[rocker_port_stats[i].type];
4770 if (!pattr)
4771 continue;
4772
4773 data[i] = rocker_tlv_get_u64(pattr);
4774 }
4775
4776 return 0;
4777}
4778
4779static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4780 void *priv)
4781{
Jiri Pirko76c6f942015-09-24 10:02:44 +02004782 return rocker_cmd_exec(rocker_port, NULL, 0,
David Ahern9766e972015-01-29 20:59:33 -07004783 rocker_cmd_get_port_stats_prep, NULL,
4784 rocker_cmd_get_port_stats_ethtool_proc,
Scott Feldmanc4f20322015-05-10 09:47:50 -07004785 priv);
David Ahern9766e972015-01-29 20:59:33 -07004786}
4787
4788static void rocker_port_get_stats(struct net_device *dev,
4789 struct ethtool_stats *stats, u64 *data)
4790{
4791 struct rocker_port *rocker_port = netdev_priv(dev);
4792
4793 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4794 int i;
4795
4796 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4797 data[i] = 0;
4798 }
David Ahern9766e972015-01-29 20:59:33 -07004799}
4800
4801static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4802{
4803 switch (sset) {
4804 case ETH_SS_STATS:
4805 return ROCKER_PORT_STATS_LEN;
4806 default:
4807 return -EOPNOTSUPP;
4808 }
4809}
4810
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004811static const struct ethtool_ops rocker_port_ethtool_ops = {
4812 .get_settings = rocker_port_get_settings,
4813 .set_settings = rocker_port_set_settings,
4814 .get_drvinfo = rocker_port_get_drvinfo,
4815 .get_link = ethtool_op_get_link,
David Ahern9766e972015-01-29 20:59:33 -07004816 .get_strings = rocker_port_get_strings,
4817 .get_ethtool_stats = rocker_port_get_stats,
4818 .get_sset_count = rocker_port_get_sset_count,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004819};
4820
4821/*****************
4822 * NAPI interface
4823 *****************/
4824
4825static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4826{
4827 return container_of(napi, struct rocker_port, napi_tx);
4828}
4829
4830static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4831{
4832 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09004833 const struct rocker *rocker = rocker_port->rocker;
4834 const struct rocker_desc_info *desc_info;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004835 u32 credits = 0;
4836 int err;
4837
4838 /* Cleanup tx descriptors */
4839 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
David Ahernf2bbca52015-01-16 14:22:29 -07004840 struct sk_buff *skb;
4841
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004842 err = rocker_desc_err(desc_info);
4843 if (err && net_ratelimit())
4844 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4845 err);
4846 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
David Ahernf2bbca52015-01-16 14:22:29 -07004847
4848 skb = rocker_desc_cookie_ptr_get(desc_info);
4849 if (err == 0) {
4850 rocker_port->dev->stats.tx_packets++;
4851 rocker_port->dev->stats.tx_bytes += skb->len;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004852 } else {
David Ahernf2bbca52015-01-16 14:22:29 -07004853 rocker_port->dev->stats.tx_errors++;
Scott Feldman4725ceb2015-05-10 09:48:08 -07004854 }
David Ahernf2bbca52015-01-16 14:22:29 -07004855
4856 dev_kfree_skb_any(skb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004857 credits++;
4858 }
4859
4860 if (credits && netif_queue_stopped(rocker_port->dev))
4861 netif_wake_queue(rocker_port->dev);
4862
4863 napi_complete(napi);
4864 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4865
4866 return 0;
4867}
4868
Simon Hormane5054642015-05-25 14:28:36 +09004869static int rocker_port_rx_proc(const struct rocker *rocker,
4870 const struct rocker_port *rocker_port,
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004871 struct rocker_desc_info *desc_info)
4872{
Simon Hormane5054642015-05-25 14:28:36 +09004873 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004874 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4875 size_t rx_len;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004876 u16 rx_flags = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004877
4878 if (!skb)
4879 return -ENOENT;
4880
4881 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4882 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4883 return -EINVAL;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004884 if (attrs[ROCKER_TLV_RX_FLAGS])
4885 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004886
4887 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4888
4889 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4890 skb_put(skb, rx_len);
4891 skb->protocol = eth_type_trans(skb, rocker_port->dev);
David Ahernf2bbca52015-01-16 14:22:29 -07004892
Scott Feldman3f98a8e2015-07-18 18:24:51 -07004893 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4894 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4895
David Ahernf2bbca52015-01-16 14:22:29 -07004896 rocker_port->dev->stats.rx_packets++;
4897 rocker_port->dev->stats.rx_bytes += skb->len;
4898
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004899 netif_receive_skb(skb);
4900
Simon Horman534ba6a2015-06-01 13:25:04 +09004901 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004902}
4903
4904static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4905{
4906 return container_of(napi, struct rocker_port, napi_rx);
4907}
4908
4909static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4910{
4911 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
Simon Hormane5054642015-05-25 14:28:36 +09004912 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004913 struct rocker_desc_info *desc_info;
4914 u32 credits = 0;
4915 int err;
4916
4917 /* Process rx descriptors */
4918 while (credits < budget &&
4919 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4920 err = rocker_desc_err(desc_info);
4921 if (err) {
4922 if (net_ratelimit())
4923 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4924 err);
4925 } else {
4926 err = rocker_port_rx_proc(rocker, rocker_port,
4927 desc_info);
4928 if (err && net_ratelimit())
4929 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4930 err);
4931 }
David Ahernf2bbca52015-01-16 14:22:29 -07004932 if (err)
4933 rocker_port->dev->stats.rx_errors++;
4934
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004935 rocker_desc_gen_clear(desc_info);
4936 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4937 credits++;
4938 }
4939
4940 if (credits < budget)
4941 napi_complete(napi);
4942
4943 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4944
4945 return credits;
4946}
4947
4948/*****************
4949 * PCI driver ops
4950 *****************/
4951
Simon Hormane5054642015-05-25 14:28:36 +09004952static void rocker_carrier_init(const struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004953{
Simon Hormane5054642015-05-25 14:28:36 +09004954 const struct rocker *rocker = rocker_port->rocker;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004955 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4956 bool link_up;
4957
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08004958 link_up = link_status & (1 << rocker_port->pport);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004959 if (link_up)
4960 netif_carrier_on(rocker_port->dev);
4961 else
4962 netif_carrier_off(rocker_port->dev);
4963}
4964
Simon Hormane5054642015-05-25 14:28:36 +09004965static void rocker_remove_ports(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004966{
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004967 struct rocker_port *rocker_port;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004968 int i;
4969
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004970 for (i = 0; i < rocker->port_count; i++) {
4971 rocker_port = rocker->ports[i];
Scott Feldmana0720312015-06-12 21:09:44 -07004972 if (!rocker_port)
4973 continue;
Jiri Pirko76c6f942015-09-24 10:02:44 +02004974 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004975 unregister_netdev(rocker_port->dev);
Ido Schimmel1ebd47e2015-08-02 19:29:16 +02004976 free_netdev(rocker_port->dev);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01004977 }
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004978 kfree(rocker->ports);
4979}
4980
Simon Horman534ba6a2015-06-01 13:25:04 +09004981static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004982{
Simon Horman534ba6a2015-06-01 13:25:04 +09004983 const struct rocker *rocker = rocker_port->rocker;
Simon Hormane5054642015-05-25 14:28:36 +09004984 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004985 int err;
4986
4987 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4988 rocker_port->dev->dev_addr);
4989 if (err) {
4990 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4991 eth_hw_addr_random(rocker_port->dev);
4992 }
4993}
4994
4995static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4996{
Simon Hormane5054642015-05-25 14:28:36 +09004997 const struct pci_dev *pdev = rocker->pdev;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01004998 struct rocker_port *rocker_port;
4999 struct net_device *dev;
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005000 u16 untagged_vid = 0;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005001 int err;
5002
5003 dev = alloc_etherdev(sizeof(struct rocker_port));
5004 if (!dev)
5005 return -ENOMEM;
5006 rocker_port = netdev_priv(dev);
5007 rocker_port->dev = dev;
5008 rocker_port->rocker = rocker;
5009 rocker_port->port_number = port_number;
Scott Feldman4a6bb6d2015-02-25 20:15:37 -08005010 rocker_port->pport = port_number + 1;
Scott Feldman5111f802014-11-28 14:34:30 +01005011 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
Scott Feldmane7335702015-09-23 08:39:17 -07005012 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
Scott Feldmanc4f20322015-05-10 09:47:50 -07005013 INIT_LIST_HEAD(&rocker_port->trans_mem);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005014
Simon Horman534ba6a2015-06-01 13:25:04 +09005015 rocker_port_dev_addr_init(rocker_port);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005016 dev->netdev_ops = &rocker_port_netdev_ops;
5017 dev->ethtool_ops = &rocker_port_ethtool_ops;
Jiri Pirko9d47c0a2015-05-10 09:47:47 -07005018 dev->switchdev_ops = &rocker_port_switchdev_ops;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005019 netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
5020 NAPI_POLL_WEIGHT);
5021 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5022 NAPI_POLL_WEIGHT);
5023 rocker_carrier_init(rocker_port);
5024
Ido Schimmel21518a62015-08-02 20:56:37 +02005025 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005026
5027 err = register_netdev(dev);
5028 if (err) {
5029 dev_err(&pdev->dev, "register_netdev failed\n");
5030 goto err_register_netdev;
5031 }
5032 rocker->ports[port_number] = rocker_port;
5033
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005034 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5035
Jiri Pirko76c6f942015-09-24 10:02:44 +02005036 rocker_port_set_learning(rocker_port, NULL);
Scott Feldman5111f802014-11-28 14:34:30 +01005037
Jiri Pirko76c6f942015-09-24 10:02:44 +02005038 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005039 if (err) {
Scott Feldmanff147022015-08-03 22:31:18 -07005040 netdev_err(rocker_port->dev, "install ig port table failed\n");
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005041 goto err_port_ig_tbl;
5042 }
5043
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005044 rocker_port->internal_vlan_id =
5045 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5046
Jiri Pirko76c6f942015-09-24 10:02:44 +02005047 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005048 if (err) {
5049 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5050 goto err_untagged_vlan;
5051 }
5052
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005053 return 0;
5054
Scott Feldmanbcfd7802015-06-01 11:39:04 -07005055err_untagged_vlan:
Jiri Pirko76c6f942015-09-24 10:02:44 +02005056 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005057err_port_ig_tbl:
Scott Feldman6c4f7782015-08-03 22:31:17 -07005058 rocker->ports[port_number] = NULL;
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005059 unregister_netdev(dev);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005060err_register_netdev:
5061 free_netdev(dev);
5062 return err;
5063}
5064
5065static int rocker_probe_ports(struct rocker *rocker)
5066{
5067 int i;
5068 size_t alloc_size;
5069 int err;
5070
5071 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
Scott Feldman27b808c2015-06-01 11:39:02 -07005072 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
Dan Carpentere65ad3b2015-02-25 16:35:32 +03005073 if (!rocker->ports)
5074 return -ENOMEM;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005075 for (i = 0; i < rocker->port_count; i++) {
5076 err = rocker_probe_port(rocker, i);
5077 if (err)
5078 goto remove_ports;
5079 }
5080 return 0;
5081
5082remove_ports:
5083 rocker_remove_ports(rocker);
5084 return err;
5085}
5086
5087static int rocker_msix_init(struct rocker *rocker)
5088{
5089 struct pci_dev *pdev = rocker->pdev;
5090 int msix_entries;
5091 int i;
5092 int err;
5093
5094 msix_entries = pci_msix_vec_count(pdev);
5095 if (msix_entries < 0)
5096 return msix_entries;
5097
5098 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5099 return -EINVAL;
5100
5101 rocker->msix_entries = kmalloc_array(msix_entries,
5102 sizeof(struct msix_entry),
5103 GFP_KERNEL);
5104 if (!rocker->msix_entries)
5105 return -ENOMEM;
5106
5107 for (i = 0; i < msix_entries; i++)
5108 rocker->msix_entries[i].entry = i;
5109
5110 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5111 if (err < 0)
5112 goto err_enable_msix;
5113
5114 return 0;
5115
5116err_enable_msix:
5117 kfree(rocker->msix_entries);
5118 return err;
5119}
5120
Simon Hormane5054642015-05-25 14:28:36 +09005121static void rocker_msix_fini(const struct rocker *rocker)
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005122{
5123 pci_disable_msix(rocker->pdev);
5124 kfree(rocker->msix_entries);
5125}
5126
5127static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5128{
5129 struct rocker *rocker;
5130 int err;
5131
5132 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5133 if (!rocker)
5134 return -ENOMEM;
5135
5136 err = pci_enable_device(pdev);
5137 if (err) {
5138 dev_err(&pdev->dev, "pci_enable_device failed\n");
5139 goto err_pci_enable_device;
5140 }
5141
5142 err = pci_request_regions(pdev, rocker_driver_name);
5143 if (err) {
5144 dev_err(&pdev->dev, "pci_request_regions failed\n");
5145 goto err_pci_request_regions;
5146 }
5147
5148 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5149 if (!err) {
5150 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5151 if (err) {
5152 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5153 goto err_pci_set_dma_mask;
5154 }
5155 } else {
5156 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5157 if (err) {
5158 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5159 goto err_pci_set_dma_mask;
5160 }
5161 }
5162
5163 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5164 dev_err(&pdev->dev, "invalid PCI region size\n");
Wei Yongjun3122a922015-04-16 20:21:02 +08005165 err = -EINVAL;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005166 goto err_pci_resource_len_check;
5167 }
5168
5169 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5170 pci_resource_len(pdev, 0));
5171 if (!rocker->hw_addr) {
5172 dev_err(&pdev->dev, "ioremap failed\n");
5173 err = -EIO;
5174 goto err_ioremap;
5175 }
5176 pci_set_master(pdev);
5177
5178 rocker->pdev = pdev;
5179 pci_set_drvdata(pdev, rocker);
5180
5181 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5182
5183 err = rocker_msix_init(rocker);
5184 if (err) {
5185 dev_err(&pdev->dev, "MSI-X init failed\n");
5186 goto err_msix_init;
5187 }
5188
5189 err = rocker_basic_hw_test(rocker);
5190 if (err) {
5191 dev_err(&pdev->dev, "basic hw test failed\n");
5192 goto err_basic_hw_test;
5193 }
5194
5195 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5196
5197 err = rocker_dma_rings_init(rocker);
5198 if (err)
5199 goto err_dma_rings_init;
5200
5201 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5202 rocker_cmd_irq_handler, 0,
5203 rocker_driver_name, rocker);
5204 if (err) {
5205 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5206 goto err_request_cmd_irq;
5207 }
5208
5209 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5210 rocker_event_irq_handler, 0,
5211 rocker_driver_name, rocker);
5212 if (err) {
5213 dev_err(&pdev->dev, "cannot assign event irq\n");
5214 goto err_request_event_irq;
5215 }
5216
5217 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5218
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005219 err = rocker_init_tbls(rocker);
5220 if (err) {
5221 dev_err(&pdev->dev, "cannot init rocker tables\n");
5222 goto err_init_tbls;
5223 }
5224
Scott Feldman52fe3e22015-09-23 08:39:18 -07005225 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5226 (unsigned long) rocker);
5227 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5228
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005229 err = rocker_probe_ports(rocker);
5230 if (err) {
5231 dev_err(&pdev->dev, "failed to probe ports\n");
5232 goto err_probe_ports;
5233 }
5234
Scott Feldmanc8beb5b2015-08-12 18:44:13 -07005235 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5236 (int)sizeof(rocker->hw.id), &rocker->hw.id);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005237
5238 return 0;
5239
5240err_probe_ports:
Scott Feldman52fe3e22015-09-23 08:39:18 -07005241 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005242 rocker_free_tbls(rocker);
5243err_init_tbls:
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005244 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5245err_request_event_irq:
5246 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5247err_request_cmd_irq:
5248 rocker_dma_rings_fini(rocker);
5249err_dma_rings_init:
5250err_basic_hw_test:
5251 rocker_msix_fini(rocker);
5252err_msix_init:
5253 iounmap(rocker->hw_addr);
5254err_ioremap:
5255err_pci_resource_len_check:
5256err_pci_set_dma_mask:
5257 pci_release_regions(pdev);
5258err_pci_request_regions:
5259 pci_disable_device(pdev);
5260err_pci_enable_device:
5261 kfree(rocker);
5262 return err;
5263}
5264
5265static void rocker_remove(struct pci_dev *pdev)
5266{
5267 struct rocker *rocker = pci_get_drvdata(pdev);
5268
Scott Feldman52fe3e22015-09-23 08:39:18 -07005269 del_timer_sync(&rocker->fdb_cleanup_timer);
Scott Feldman9f6bbf72014-11-28 14:34:27 +01005270 rocker_free_tbls(rocker);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005271 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5272 rocker_remove_ports(rocker);
5273 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5274 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5275 rocker_dma_rings_fini(rocker);
5276 rocker_msix_fini(rocker);
5277 iounmap(rocker->hw_addr);
5278 pci_release_regions(rocker->pdev);
5279 pci_disable_device(rocker->pdev);
5280 kfree(rocker);
5281}
5282
5283static struct pci_driver rocker_pci_driver = {
5284 .name = rocker_driver_name,
5285 .id_table = rocker_pci_id_table,
5286 .probe = rocker_probe,
5287 .remove = rocker_remove,
5288};
5289
Scott Feldman6c707942014-11-28 14:34:28 +01005290/************************************
5291 * Net device notifier event handler
5292 ************************************/
5293
Simon Hormane5054642015-05-25 14:28:36 +09005294static bool rocker_port_dev_check(const struct net_device *dev)
Scott Feldman6c707942014-11-28 14:34:28 +01005295{
5296 return dev->netdev_ops == &rocker_port_netdev_ops;
5297}
5298
5299static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5300 struct net_device *bridge)
5301{
Scott Feldman027e00d2015-06-01 11:39:05 -07005302 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005303 int err;
5304
Scott Feldman027e00d2015-06-01 11:39:05 -07005305 /* Port is joining bridge, so the internal VLAN for the
5306 * port is going to change to the bridge internal VLAN.
5307 * Let's remove untagged VLAN (vid=0) from port and
5308 * re-add once internal VLAN has changed.
5309 */
5310
5311 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5312 if (err)
5313 return err;
5314
Simon Hormandf6a2062015-05-21 12:40:17 +09005315 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005316 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005317 rocker_port->internal_vlan_id =
5318 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005319
5320 rocker_port->bridge_dev = bridge;
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005321 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
Scott Feldman6c707942014-11-28 14:34:28 +01005322
Jiri Pirko76c6f942015-09-24 10:02:44 +02005323 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005324}
5325
5326static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5327{
Scott Feldman027e00d2015-06-01 11:39:05 -07005328 u16 untagged_vid = 0;
Scott Feldman6c707942014-11-28 14:34:28 +01005329 int err;
5330
Scott Feldman027e00d2015-06-01 11:39:05 -07005331 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5332 if (err)
5333 return err;
5334
Simon Hormandf6a2062015-05-21 12:40:17 +09005335 rocker_port_internal_vlan_id_put(rocker_port,
Scott Feldman6c707942014-11-28 14:34:28 +01005336 rocker_port->bridge_dev->ifindex);
Scott Feldman6c707942014-11-28 14:34:28 +01005337 rocker_port->internal_vlan_id =
5338 rocker_port_internal_vlan_id_get(rocker_port,
5339 rocker_port->dev->ifindex);
Scott Feldman027e00d2015-06-01 11:39:05 -07005340
Scott Feldman3f98a8e2015-07-18 18:24:51 -07005341 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5342 false);
Scott Feldman027e00d2015-06-01 11:39:05 -07005343 rocker_port->bridge_dev = NULL;
5344
Jiri Pirko76c6f942015-09-24 10:02:44 +02005345 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
Scott Feldmane47172a2015-02-25 20:15:38 -08005346 if (err)
5347 return err;
5348
5349 if (rocker_port->dev->flags & IFF_UP)
Jiri Pirko76c6f942015-09-24 10:02:44 +02005350 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Scott Feldman6c707942014-11-28 14:34:28 +01005351
5352 return err;
5353}
5354
Simon Horman82549732015-07-16 10:39:14 +09005355
5356static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5357 struct net_device *master)
5358{
5359 int err;
5360
5361 rocker_port->bridge_dev = master;
5362
Jiri Pirko76c6f942015-09-24 10:02:44 +02005363 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
Simon Horman82549732015-07-16 10:39:14 +09005364 if (err)
5365 return err;
Jiri Pirko76c6f942015-09-24 10:02:44 +02005366 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
Simon Horman82549732015-07-16 10:39:14 +09005367
5368 return err;
5369}
5370
Jiri Pirko686ed302015-08-27 09:31:23 +02005371static int rocker_port_master_linked(struct rocker_port *rocker_port,
5372 struct net_device *master)
Scott Feldman6c707942014-11-28 14:34:28 +01005373{
Scott Feldman6c707942014-11-28 14:34:28 +01005374 int err = 0;
5375
Jiri Pirko686ed302015-08-27 09:31:23 +02005376 if (netif_is_bridge_master(master))
5377 err = rocker_port_bridge_join(rocker_port, master);
5378 else if (netif_is_ovs_master(master))
5379 err = rocker_port_ovs_changed(rocker_port, master);
5380 return err;
5381}
Scott Feldman6c707942014-11-28 14:34:28 +01005382
Jiri Pirko686ed302015-08-27 09:31:23 +02005383static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5384{
5385 int err = 0;
5386
5387 if (rocker_port_is_bridged(rocker_port))
5388 err = rocker_port_bridge_leave(rocker_port);
5389 else if (rocker_port_is_ovsed(rocker_port))
5390 err = rocker_port_ovs_changed(rocker_port, NULL);
Scott Feldman6c707942014-11-28 14:34:28 +01005391 return err;
5392}
5393
5394static int rocker_netdevice_event(struct notifier_block *unused,
5395 unsigned long event, void *ptr)
5396{
Jiri Pirko686ed302015-08-27 09:31:23 +02005397 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5398 struct netdev_notifier_changeupper_info *info;
5399 struct rocker_port *rocker_port;
Scott Feldman6c707942014-11-28 14:34:28 +01005400 int err;
5401
Jiri Pirko686ed302015-08-27 09:31:23 +02005402 if (!rocker_port_dev_check(dev))
5403 return NOTIFY_DONE;
5404
Scott Feldman6c707942014-11-28 14:34:28 +01005405 switch (event) {
5406 case NETDEV_CHANGEUPPER:
Jiri Pirko686ed302015-08-27 09:31:23 +02005407 info = ptr;
5408 if (!info->master)
5409 goto out;
5410 rocker_port = netdev_priv(dev);
5411 if (info->linking) {
5412 err = rocker_port_master_linked(rocker_port,
5413 info->upper_dev);
5414 if (err)
5415 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5416 err);
5417 } else {
5418 err = rocker_port_master_unlinked(rocker_port);
5419 if (err)
5420 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5421 err);
5422 }
Scott Feldman6c707942014-11-28 14:34:28 +01005423 break;
5424 }
Jiri Pirko686ed302015-08-27 09:31:23 +02005425out:
Scott Feldman6c707942014-11-28 14:34:28 +01005426 return NOTIFY_DONE;
5427}
5428
5429static struct notifier_block rocker_netdevice_nb __read_mostly = {
5430 .notifier_call = rocker_netdevice_event,
5431};
5432
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005433/************************************
5434 * Net event notifier event handler
5435 ************************************/
5436
5437static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5438{
5439 struct rocker_port *rocker_port = netdev_priv(dev);
Scott Feldman02a9fbf2015-06-12 21:35:47 -07005440 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5441 ROCKER_OP_FLAG_NOWAIT;
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005442 __be32 ip_addr = *(__be32 *)n->primary_key;
5443
Jiri Pirko76c6f942015-09-24 10:02:44 +02005444 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005445}
5446
5447static int rocker_netevent_event(struct notifier_block *unused,
5448 unsigned long event, void *ptr)
5449{
5450 struct net_device *dev;
5451 struct neighbour *n = ptr;
5452 int err;
5453
5454 switch (event) {
5455 case NETEVENT_NEIGH_UPDATE:
5456 if (n->tbl != &arp_tbl)
5457 return NOTIFY_DONE;
5458 dev = n->dev;
5459 if (!rocker_port_dev_check(dev))
5460 return NOTIFY_DONE;
5461 err = rocker_neigh_update(dev, n);
5462 if (err)
5463 netdev_warn(dev,
5464 "failed to handle neigh update (err %d)\n",
5465 err);
5466 break;
5467 }
5468
5469 return NOTIFY_DONE;
5470}
5471
5472static struct notifier_block rocker_netevent_nb __read_mostly = {
5473 .notifier_call = rocker_netevent_event,
5474};
5475
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005476/***********************
5477 * Module init and exit
5478 ***********************/
5479
5480static int __init rocker_module_init(void)
5481{
Scott Feldman6c707942014-11-28 14:34:28 +01005482 int err;
5483
5484 register_netdevice_notifier(&rocker_netdevice_nb);
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005485 register_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005486 err = pci_register_driver(&rocker_pci_driver);
5487 if (err)
5488 goto err_pci_register_driver;
5489 return 0;
5490
5491err_pci_register_driver:
Gilad Ben-Yossefa076e6b2015-06-23 10:52:10 +03005492 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005493 unregister_netdevice_notifier(&rocker_netdevice_nb);
5494 return err;
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005495}
5496
5497static void __exit rocker_module_exit(void)
5498{
Scott Feldmanc1beeef2015-03-05 21:21:20 -08005499 unregister_netevent_notifier(&rocker_netevent_nb);
Scott Feldman6c707942014-11-28 14:34:28 +01005500 unregister_netdevice_notifier(&rocker_netdevice_nb);
Jiri Pirko4b8ac962014-11-28 14:34:26 +01005501 pci_unregister_driver(&rocker_pci_driver);
5502}
5503
5504module_init(rocker_module_init);
5505module_exit(rocker_module_exit);
5506
5507MODULE_LICENSE("GPL v2");
5508MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5509MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5510MODULE_DESCRIPTION("Rocker switch device driver");
5511MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);